示例#1
0
def create_data(problems,methods,cfl_nums,lim_type):
    import pickle
    import ssp_lmm_vss
    try:
        import pkgutil
        pkgutil.find_loader("clawpack.petclaw")
        use_petsc = True
    except ImportError:
        use_petsc = False
        print 'Unable to import petclaw, is PETSC/petsc4py installed?'
        raise

    for problem in problems:
        paramtrs = ssp_lmm_vss.default_parameters(problem)
        paramtrs.num_output_times = 1 
        paramtrs.max_steps = 200000
        paramtrs.use_petsc = use_petsc
        paramtrs.N = [640,160]

        for method in methods:
            data = ssp_lmm_vss.run(problem,N=paramtrs.N,method=method,cfl_nums=cfl_nums[method],\
                    lim_type=lim_type[method],limiter=4,paramtrs=paramtrs,iplot=False)
            if data is not None:
                solution_data = {}
                solution_data[method] = data
                output = open(problem+'_'+method+'.pkl', "wb")
                pickle.dump(solution_data, output, -1)
                output.close()
示例#2
0
def available_access_systems():
    """Return the list of available data access systems."""
    asList = []
    if find_loader('imdb.parser.http') is not None:
        asList.append('http')
    if find_loader('imdb.parser.sql') is not None:
        asList.append('sql')
    return asList
示例#3
0
文件: IWindow.py 项目: alien3211/lom
def getWindow(configData):
    log.LOG("IN GETWINDOW")
    if pkgutil.find_loader('gtk') is not None:
        log.LOG("import GTK")
        import WindowGTK
        configData['user'] = os.environ['USER']
        configData['lomrc'] = os.environ['HOME'] + "/.lomrc"
        return WindowGTK.Window(configData)
    elif pkgutil.find_loader('PyQt') is not None:
        log.LOG("import QT")
        import WindowQT
        return WindowQT.Window()
示例#4
0
文件: plugins.py 项目: exaile/exaile
    def is_potentially_broken(self, info):
        '''
            Returns True if one of the modules that the plugin requires is
            not detected as available.

            :param info: The data returned from get_plugin_info()
        '''
        import pkgutil
        from gi.repository import GIRepository

        gir = GIRepository.Repository.get_default()

        modules = info.get('RequiredModules', [])

        for module in modules:
            pair = module.split(':', 1)
            if len(pair) > 1:
                prefix, module = pair
                if prefix == 'gi':
                    if not gir.enumerate_versions(module):
                        return True
            else:
                if not pkgutil.find_loader(module):
                    return True

        return False
示例#5
0
def list_services():
    """
    列出所有接口模块信息
    """
    services = []
    for loader, module_name, is_pkg in\
            pkgutil.walk_packages(os.path.dirname(__file__)):
        if not is_pkg:
            continue

        mod = loader.find_module(module_name).load_module(module_name)
        try:
            if mod.DICT_INFO and mod.DICT_INFO['enable']:
                info = dict(mod.DICT_INFO)
                info['module_name'] = module_name

                def set_default(d, name, default):
                    if name not in d:
                        d[name] = default

                set_default(info, 'description', 'no description')

                config_name = '%s.config' % module_name
                config = pkgutil.find_loader(
                    config_name).load_module(config_name)
                info['options'] = config.Config.get_options()
                info['current'] = config.Config.get_option()
                services.append(info)
        except Exception as e:
            pass
    return services
def scan_ast(root, ignore_paths):
    for node in ast.walk(root):
        if isinstance(node, ast.Import):
            modules = [n.name.split('.')[0] for n in node.names]
        elif isinstance(node, ast.ImportFrom):
            if not node.module:
                continue
            modules = [node.module.split('.')[0]]
        else:
            continue

        for module_name in modules:
            try:
                package = pkgutil.find_loader(module_name)
                if not package or not hasattr(package, 'filename'):
                    continue

                filename = package.filename

                c1 = not filename
                c2 = any(filename.startswith(i) for i in ignore_paths)
                c3 = module_name == filename
                if c1 or c2 or c3:
                    continue

                yield module_name
            except ImportError:
                pass
示例#7
0
def version_update(cfg, *args):

    if (version_info[0] == 3):
        if (find_loader('web') == None):
            cfg['modules'][-1] = {'module': 'git+https://github.com/webpy/webpy.git@py3#egg=webpy'}
        else:
            del cfg['modules'][-1]
示例#8
0
 def load_modules(self, package, required='ClassName'):
     self.debug('** Checking modules in the {0} package.'.format(package.__name__))
     modules = {}
     walker = pkgutil.walk_packages(package.__path__, package.__name__ + '.')
     for tup in walker:
         name = tup[1]
         
         self.debug('** Found module \'{0}\'.'.format(name))
             
         if name in self.modules.keys():
             self.debug('** Previously loaded module. Reloading!')
             imp.reload(self.modules[name])
             modules[name] = self.modules[name]
             continue
             
         loader = pkgutil.find_loader(name)
         mod = loader.load_module(name)
         
         if not hasattr(mod, required):
             self.debug('>> Ignoring module {0}.'.format(name))
             self.debug('>> Module contains no {0} class.'.format(required))
             continue
         
         modules[name] = mod
     self.modules = modules
示例#9
0
def get_modpath_from_modname(modname, prefer_pkg=False, prefer_main=False):
    """
    Same as get_modpath but doesnt import directly

    SeeAlso:
        get_modpath
    """
    from os.path import dirname, basename, join, exists
    initname = '__init__.py'
    mainname = '__main__.py'
    if modname in sys.modules:
        modpath = sys.modules[modname].__file__.replace('.pyc', '.py')
    else:
        import pkgutil
        loader = pkgutil.find_loader(modname)
        modpath = loader.filename.replace('.pyc', '.py')
        if '.' not in basename(modpath):
            modpath = join(modpath, initname)
    if prefer_pkg:
        if modpath.endswith(initname) or modpath.endswith(mainname):
            modpath = dirname(modpath)
    if prefer_main:
        if modpath.endswith(initname):
            main_modpath = modpath[:-len(initname)] + mainname
            if exists(main_modpath):
                modpath = main_modpath
    return modpath
示例#10
0
def insert_cuts():
    def my_import(name):
        mod = __import__(name)
        components = name.split(".")
        for comp in components[1:]:
            mod = getattr(mod, comp)
        return mod

    function_list = []

    # Which database are we using?
    # Grab all the modules from the update directory
    current_db_name = "%s.cuts" % get_current_db_module()
    update_module = my_import(current_db_name)
    imported_list = []
    for loader, name, ispkg in pkgutil.iter_modules([update_module.__path__[0]]):
        if ispkg:
            continue
        load = pkgutil.find_loader("%s.%s" % (current_db_name, name))
        mod = load.load_module("%s.%s" % (current_db_name, name))
        imported_list.append(mod)

    cut_list = []
    for mdl in imported_list:
        try:
            print "Trying to load: %s " % mdl
            cut_list.append(mdl.get_cut_class())
        except AttributeError:
            print "Module: %s does not have get_cut_class attribute, skipping." % mdl
            pass

    for cut in cut_list:
        insert_cut_into_database(cut)
示例#11
0
文件: app.py 项目: opendatateam/udata
def create_app(config='udata.settings.Defaults', override=None,
               init_logging=init_logging):
    '''Factory for a minimal application'''
    app = UDataApp(APP_NAME)
    app.config.from_object(config)

    settings = os.environ.get('UDATA_SETTINGS', join(os.getcwd(), 'udata.cfg'))
    if exists(settings):
        app.settings_file = settings  # Keep track of loaded settings for diagnostic
        app.config.from_pyfile(settings)

    if override:
        app.config.from_object(override)

    # Loads defaults from plugins
    for pkg in entrypoints.get_roots(app):
        if pkg == 'udata':
            continue  # Defaults are already loaded
        module = '{}.settings'.format(pkg)
        if pkgutil.find_loader(module):
            settings = pkgutil.get_loader(module)
            for key, default in settings.__dict__.items():
                app.config.setdefault(key, default)

    app.json_encoder = UDataJsonEncoder

    app.debug = app.config['DEBUG'] and not app.config['TESTING']

    app.wsgi_app = ProxyFix(app.wsgi_app)

    init_logging(app)
    register_extensions(app)

    return app
示例#12
0
 def _find_module_filename(self, modulename):
     """finds the filename of the module with the given name (supports submodules)"""
     loader = pkgutil.find_loader(modulename)
     if loader is None:
         raise ImportError(modulename)
     search_path = loader.get_filename(modulename)
     return search_path
示例#13
0
def get_module_file_attribute(package):
    """
    Get the absolute path of the module with the passed name.

    Since modules *cannot* be directly imported during analysis, this function
    spawns a subprocess importing this module and returning the value of this
    module's `__file__` attribute.

    Parameters
    ----------
    package : str
        Fully-qualified name of this module.

    Returns
    ----------
    str
        Absolute path of this module.
    """
    # First try to use 'pkgutil'. - fastest but doesn't work on
    # certain modules in pywin32, which replace all module attributes
    # with those of the .dll
    try:
        loader = pkgutil.find_loader(package)
        attr = loader.get_filename(package)
    # Second try to import module in a subprocess. Might raise ImportError.
    except (AttributeError, ImportError):
        # Statement to return __file__ attribute of a package.
        __file__statement = """
            import %s as p
            print(p.__file__)
        """
        attr = exec_statement(__file__statement % package)
        if not attr.strip():
            raise ImportError
    return attr
示例#14
0
    def startRunnable(self, importmodule, runnable, kwargs=None, hostlist=None):
        """Instruct the given hosts to run a certain Runnable.
        :param runnable: The name of a subclass of RandomTrafficReceiver.Runnable that does the work that the hosts shall be doing.
        :type runnable: str
        :param importmodule: The name of  the module where the subclass of RandomTrafficReceiver.Runnable is defined.
        :type importmodule: str
        :param kwargs: The constructor parameter for the runnable
        :type kwargs: dict
        :param hostlist: A list of hosts that will execute the runnable. Defaults to all currently known hosts.
        :type hostlist: list"""
        if kwargs is None: kwargs = dict()

        assert isinstance(importmodule, str)
        assert isinstance(runnable, str)
        assert isinstance(kwargs, dict)

        logging.info("start runnable %s on hosts %s"%(runnable, hostlist))

        # Test if importmodule exists and has a runnable with the given name
        try:
            runnableLoader = pkgutil.find_loader('actors.%s'%importmodule)
            assert runnableLoader is not None, "The runnable %s does not exist in module %s"%(runnable, importmodule)
        except ImportError as ex:
            logging.error("The module actors.%s does not exist"%importmodule)

        # By default, start runnable on all known hosts
        if hostlist is None:
            hostlist = self.knownHosts.keys()
        assert isinstance(hostlist, list) or isinstance(hostlist, set) or isinstance(hostlist, frozenset)

        for hostid in hostlist:
            logging.debug("Starting runnable %s of host %s"%(runnable, hostid))
            startOperation = lambda connector: connector.client.startRunnable(importmodule, runnable, json.dumps(kwargs))
            self._executeOperationOnHost(hostid, startOperation)
示例#15
0
    def __find_and_load(self, fullname, name, parent=None, local=None):
        # The 'imp' module doesn't support PEP 302 extensions like
        # sys.path_hooks (used for zipped eggs), so we use (undocumented)
        # functionality from pkgutil instead.
        if parent is None:
            assert local is None
            try:
                loader = self.__find_loader_in_path(fullname, self.__path)
                local = True
            except ImportError:
                loader = pkgutil.find_loader(fullname)
                if loader is None:
                    raise ImportError("no module named " + fullname)
                local = False
        else:
            assert local is not None
            if hasattr(parent, '__path__'):
                loader = self.__find_loader_in_path(fullname, parent.__path__)
            else:
                raise ImportError("no module named " + fullname)

        if local:
            module = self.__load_local_module(fullname, loader)
            self.__modules[name] = module
        else:
            module =  loader.load_module(fullname)

        if parent is not None:
            parent.__dict__[name] = module

        return module, local
示例#16
0
def _load_settings():
    global _settings
    loader = pkgutil.find_loader(__name__ + '.settings')
    if loader is None:
        _settings = {}
    else:
        _settings = loader.load_module(__name__ + '.settings').settings
示例#17
0
def write_to_file(path_to_filename, data, append_to_file=True, create_file=False):
    filename, filename_extension = os.path.splitext(path_to_filename)
    if (filename_extension == ".html"):
        import pkgutil
        if (not pkgutil.find_loader("bs4") == None):
            from bs4 import BeautifulSoup, Tag
            soup = BeautifulSoup(data, 'html.parser')
            data = soup.prettify(formatter='html')

    [parent_dir, filename] = os.path.split(path_to_filename)
    if (mkdirs(parent_dir)):
        if (os.path.isfile(path_to_filename) or create_file):
            try:
                file_mode = "w"
                if (append_to_file):
                    file_mode = "a"
                fout = open(path_to_filename, file_mode)
                for line in data:
                    fout.write(line)
                fout.close()
                message = "The file was successfully written to: %s" %(path_to_filename)
                logging.getLogger(glocktop_analyze.MAIN_LOGGER_NAME).debug(message)
                return True
            except UnicodeEncodeError, e:
                message = "There was a unicode encode error writing to the file: %s." %(path_to_filename)
                logging.getLogger(glocktop_analyze.MAIN_LOGGER_NAME).error(message)
            except IOError, e:
                message = "There was an error writing to the file: %s." %(path_to_filename)
                logging.getLogger(glocktop_analyze.MAIN_LOGGER_NAME).error(message)
示例#18
0
文件: __init__.py 项目: t0mk/arapi
def loadSubApp(main_app, subapp_modname):
    logging.info("Loading python module for sub-application: %s" %
        subapp_modname)

    # this is lame but until I find a nicer way to load a module..
    loader = pkgutil.find_loader('arapi.subapps.' + subapp_modname)
    subapp_mod = loader.load_module(subapp_modname)

    # mount sub-application
    logging.info("Mounting sub-application: %s to %s" %
        (subapp_modname, subapp_mod.url_base))
    main_app.mount(subapp_mod.url_base, subapp_mod.app, skip=None)

    # create a doc resource for a sub-app, and update doc of mother app
    subapp_mod.app.config.doc = getHelpDoc(subapp_mod.app,
                                           subapp_mod.url_base)
    main_app.config.doc = getHelpDoc(main_app, "/")

    # inherit plugins from the main app
    # (Is this cool? maybe each sub-app should have it's own plugin instance)
    for plugin in main_app.plugins:
        if type(plugin) in INHERIT_PLUGINS:
            logging.info("Mounting plugin %s to %s" %
                (plugin, subapp_modname))
            subapp_mod.app.install(plugin)
示例#19
0
文件: dbo.py 项目: hydratk/hydratk
    def __init__(self, dsn, username=None, password=None, options={}, autoconnect=True):
        """ Class constructor

        Called when object is initialized

        Args: 
           dsn (str): format: dbdriver:db_string
           username (str): username
           password (str): password 
           options (dict): driver specific options

        Returns:
           DBO: object on success 

        Raises:
           exception: DBOException

        """

        driver_name = self._get_driver_from_dsn(dsn)
        if driver_name in dbo_drivers:
            if (driver_name != 'sqlite' and find_loader('hydratk.lib.network') == None):
                raise DBOException('Library hydratk-lib-network not installed')
            self._driver_name = driver_name
            dbo_driver_mod_str = '{0}.driver'.format(dbo_drivers[driver_name])
            dbo_driver_mod = self._import_dbo_driver(dbo_driver_mod_str)

        else:
            raise DBOException('Not existing driver: {0}'.format(driver_name))

        try:
            self._dbo_driver = dbo_driver_mod.DBODriver(
                dsn, username, password, options, autoconnect)
        except Exception as e:
            print(e)
示例#20
0
 def load_modules(self):
     self.dlog('** Checking modules in extensions folder.')
     modules = {}
     walker = pkgutil.walk_packages(extensions.__path__, extensions.__name__ + '.')
     for tup in walker:
         name = tup[1]
         
         self.dlog('** Found module \'{0}\'.'.format(name))
             
         if name in self.modules.keys():
             self.dlog('** Previously loaded module. Reloading!')
             imp.reload(self.modules[name])
             modules[name] = self.modules[name]
             continue
             
         loader = pkgutil.find_loader(name)
         mod = loader.load_module(name)
         
         if not hasattr(mod, 'Extension'):
             self.dlog('>> Ignoring extension module {0}.'.format(name))
             self.dlog('>> Module contains no Extension class.')
             continue
         
         modules[name] = mod
     self.modules = modules
示例#21
0
def check_module_installed(modname):
    """
    Check if a python module is installed without attempting to import it.
    Note, that if ``modname`` indicates a child module, the parent module is
    always loaded.

    Args:
        modname (str):  module name

    Returns:
        bool: found

    References:
        http://stackoverflow.com/questions/14050281/module-exists-without-importing

    CommandLine:
        python -m utool.util_import check_module_installed --show --verbimp --modname=this
        python -m utool.util_import check_module_installed --show --verbimp --modname=guitool
        python -m utool.util_import check_module_installed --show --verbimp --modname=guitool.__PYQT__

    Example:
        >>> # ENABLE_DOCTEST
        >>> from utool.util_import import *  # NOQA
        >>> import utool as ut
        >>> modname = ut.get_argval('--modname', default='this')
        >>> is_installed = check_module_installed(modname)
        >>> is_imported = modname in sys.modules
        >>> print('module(%r).is_installed = %r' % (modname, is_installed))
        >>> print('module(%r).is_imported = %r' % (modname, is_imported))
        >>> assert 'this' not in sys.modules, 'module(this) should not have ever been imported'
    """
    import pkgutil
    loader = pkgutil.find_loader(modname)
    is_installed = loader is not None
    return is_installed
示例#22
0
def insert_views():
    def my_import(name):
        mod = __import__(name)
        components = name.split('.')
        for comp in components[1:]:
            mod = getattr(mod, comp)
        return mod

    function_list = []
    # Which database are we using?
    # Grab all the modules from the update directory
    current_db_name = '%s.views' % get_current_db_module()
    update_module = my_import(current_db_name)
    imported_list = []
    for loader,name,ispkg in pkgutil.iter_modules(update_module.__path__):
        if ispkg: continue
        load = pkgutil.find_loader('%s.%s' % (current_db_name, name))
        mod = load.load_module("%s.%s" % (current_db_name,name))
        imported_list.append(mod)

    view_list = []
    for mdl in imported_list:
        try:
            view_list.append(mdl.get_view_class())
        except AttributeError:
            pass

    for view in view_list:
        insert_view_into_database(view)
示例#23
0
def checkConfiguration():
    if (not os.path.isdir('/etc/knockknock.d/')):
        print "/etc/knockknock.d/ does not exist.  You need to setup your profiles first.."
        sys.exit(3)

    if (not os.path.isdir('/etc/knockknock.d/profiles/')):
        print "/etc/knockknock.d/profiles/ does not exist.  You need to setup your profiles first..."
        sys.exit(3)

    # Retreive the system init type from /proc
    with open('/proc/1/status', 'r') as f:
        global initprocname
        initprocname = f.readline().split()[1]

    # Verify whether or not the python-systemd dependency is required as well
    # as whether or not it is fulfilled (optimistically written with python3
    # support)
    if (sys.version_info > (3, 0)):
        import importlib
        if initprocname == "systemd" and importlib.util.find_spec("systemd") is None:
            print "Your init system was detected as systemd but the python systemd module is not installed. You need to install it first..."
            sys.exit(3)
    else:
        import pkgutil
        if initprocname == "systemd" and pkgutil.find_loader("systemd") is None:
            print "Your init system was detected as systemd but the python systemd module is not installed. You need to install it first..."
            sys.exit(3)
示例#24
0
def build_config(env_var=None):
    """Construct a new application configuration outside of a web app.

    The configuration will be loaded from the same sources as it would
    be when loaded from a Flask web app (defaults and site specific).

    This method should only be used when not loading a web app, such as
    when loading configuration for a CLI script. Web apps should use the
    :func:`bootstrap` method instead.

    :param str env_var: Name of the ENV var to load a site specific
        configuration file from. If the var is not specified or does
        not point to a valid configuration file it will be silently
        ignored.
    :return: Loaded configuration
    :rtype: Config
    """
    loader = pkgutil.find_loader("avalon")
    if loader is None:
        raise RuntimeError("Could not find package loader for 'avalon' module")

    path = loader.get_filename()
    if path is None:
        raise RuntimeError("Could not find full filename for 'avalon' module")

    conf = Config(path)
    _load_configuration(conf, env_var)
    return conf
示例#25
0
def custom_import(*names):
    """ Imports SeisFlows module and extracts class of same name. For example,

            custom_import('workflow', 'inversion') 

        imports 'seisflows.workflow.inversion' and, from this module, extracts
        class 'inversion'.
    """
    # parse input arguments
    if len(names) == 0:
        raise Exception(ImportError1)
    if names[0] not in SeisflowsObjects.names:
        raise Exception(ImportError2)
    if len(names) == 1:
        names += (_val(names[0]),)
    if not names[1]:
        return Null

    # generate package list
    packages = ['seisflows']
    if os.getenv('SEISFLOWS_PACKAGES'):
        for package in os.getenv('SEISFLOWS_PACKAGES').split(','):
            if package in packages:
                continue
            if find_loader(package):
                packages += [package]

    # does module exist?
    _exists = False
    for package in packages:
        full_dotted_name = package+'.'+names[0]+'.'+names[1]
        if find_loader(full_dotted_name):
            _exists = True
            break
    if not _exists:
        raise Exception(ImportError3 % 
            (names[0], names[1], names[0].upper()))

    # import module
    module = import_module(full_dotted_name)

    # extract class
    if hasattr(module, names[1]):
        return getattr(module, names[1])
    else:
        raise Exception(ImportError4 % 
            (names[0], names[1], names[1]))
def update_calculations_on_database():
    server = SoudanServer()
    
    def my_import(name):
        mod = __import__(name)
        components = name.split('.')
        for comp in components[1:]:
            mod = getattr(mod, comp)
        return mod

    # Which database are we using?
    # Grab all the modules from the update directory
    module_list = []
    current_db_name = '%s.update' % get_current_db_module()
    update_module = my_import(current_db_name)
    for loader,name,ispkg in pkgutil.iter_modules([update_module.__path__[0]]):
        if ispkg: continue
        load = pkgutil.find_loader('%s.%s' % (current_db_name, name))
        mod = load.load_module("%s.%s" % (current_db_name,name))
        module_list.append(mod)

    num_cpus = detectCPUs() 
    print "Performing the following updates: " 
    must_cycle = False
    for amod in module_list: 
        # Get the view to use
        view = amod.get_view()
        list_of_docs = view(server.get_database())
        print "  %s" % amod.__name__

        total_list = numpy.array([id.id for id in list_of_docs])
        # Now split up the list for number of cpus
        if len(total_list) > 0: must_cycle = True

        all_lists = [total_list[i::num_cpus] for i in range(num_cpus)]

        # ship out to threads
        # We use a fork because the amount of work done
        # by the child should be fairly significant, making
        # the amount of time spent to fork negligible.  FixME?
        thread_list = []
        for alist in all_lists: 
            if len(alist) == 0: continue # Don't send out for 0 length lists
            pid = os.fork()
            if pid: # parent
                thread_list.append(pid)
            else: # child process
                del server
                run_update_calc(SoudanServer, alist, amod.update_rundoc)
                sys.exit(0)
                # stop here for the child process

        # Wait for worker children to complete
        for thread in thread_list:
            os.waitpid(-1, 0)

    if must_cycle:
        print "Some documents were updated, cycling again to resolve all updates"
        update_calculations_on_database()
示例#27
0
def check_module_installed(modname):
    """
    Check if a python module is installed without attempting to import it.
    Note, that if ``modname`` indicates a child module, the parent module is
    always loaded.

    Args:
        modname (str):  module name

    Returns:
        bool: found

    References:
        http://stackoverflow.com/questions/14050281/module-exists-without-importing

    CommandLine:
        python -m utool.util_import check_module_installed --show --verbimp --modname=this
        python -m utool.util_import check_module_installed --show --verbimp --modname=guitool
        python -m utool.util_import check_module_installed --show --verbimp --modname=guitool.__PYQT__
        python -m utool.util_import check_module_installed --show --verbimp --modname=ibeis.scripts.iccv

    Example:
        >>> # ENABLE_DOCTEST
        >>> from utool.util_import import *  # NOQA
        >>> import utool as ut
        >>> modname = ut.get_argval('--modname', default='this')
        >>> is_installed = check_module_installed(modname)
        >>> is_imported = modname in sys.modules
        >>> print('module(%r).is_installed = %r' % (modname, is_installed))
        >>> print('module(%r).is_imported = %r' % (modname, is_imported))
        >>> assert 'this' not in sys.modules, 'module(this) should not have ever been imported'
    """
    import pkgutil
    if '.' in modname:
        # Prevent explicit import if possible
        parts = modname.split('.')
        base = parts[0]
        submods = parts[1:]
        loader = pkgutil.find_loader(base)
        if loader is not None:
            # TODO: check to see if path to the submod exists
            submods
            return True
    loader = pkgutil.find_loader(modname)
    is_installed = loader is not None
    return is_installed
示例#28
0
def inference (inputs, num_classes):
    full = 'tensorflow.contrib.slim.python.slim.nets.' + FLAGS.net
    # e.g. full == 'tensorflow.contrib.slim.python.slim.nets.vgg.vgg_a'
    fs = full.split('.')
    loader = pkgutil.find_loader('.'.join(fs[:-1]))
    module = loader.load_module('')
    net = getattr(module, fs[-1])
    return net(inputs, num_classes)
示例#29
0
def _loader_hook(modpath):
	modname = ".".join(modpath)
	loader = pkgutil.find_loader(modname)
	if not loader:
		raise ImportError("No loader found for %s" % modname)
	if not loader.is_package(modname):
		raise ImportError("Is not a package")
	return loader
示例#30
0
文件: runner.py 项目: lukhar/mojo
def create(tool):
    if not pkgutil.find_loader(tool if tool != 'py.test' else 'pytest'):
        sys.exit('{} is not installed on your system.'.format(tool))
    elif tool == 'py.test':
        return PyTestRunner()
    elif tool == 'nose':
        return NoseTestRunner()
    else:
        raise Exception('{} is not yet supported.'.format(tool))
示例#31
0
'''
    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).

    PM4Py is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    PM4Py is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
'''

from pm4py.algo.evaluation import precision, replay_fitness, simplicity, generalization, algorithm
import pkgutil

if pkgutil.find_loader("pyemd"):
    # import the EMD only if the pyemd package is installed
    from pm4py.algo.evaluation import earth_mover_distance
'''
    This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).

    PM4Py is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    PM4Py is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.simulation import playout, montecarlo, tree_playout
import pkgutil

# tree generation is possible only with scipy installed
if pkgutil.find_loader("scipy"):
    from pm4py.simulation import tree_generator
示例#33
0
    along with PM4Py.  If not, see <https://www.gnu.org/licenses/>.
'''
import pkgutil
from enum import Enum

from pm4py.objects.log.importer.xes.variants import iterparse, line_by_line, iterparse_mem_compressed, iterparse_20


class Variants(Enum):
    ITERPARSE = iterparse
    LINE_BY_LINE = line_by_line
    ITERPARSE_MEM_COMPRESSED = iterparse_mem_compressed
    ITERPARSE_20 = iterparse_20


if pkgutil.find_loader("lxml"):
    DEFAULT_VARIANT = Variants.ITERPARSE
else:
    DEFAULT_VARIANT = Variants.LINE_BY_LINE


def apply(path, parameters=None, variant=DEFAULT_VARIANT):
    """
    Import a XES log into a EventLog object

    Parameters
    -----------
    path
        Log path
    parameters
        Parameters of the algorithm, including
示例#34
0
#we do it this way as we can use the same script for all releases and people who
#dont want V2 can still use this script
_fall17V2PhoMVAIDModules = [
    'RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Fall17_94X_V2_cff'
    ]
_fall17V2PhoCutIDModules = [
    'RecoEgamma.PhotonIdentification.Identification.cutBasedPhotonID_Fall17_94X_V2_cff'
    ]
_fall17V2EleIDModules = [
    'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Fall17_94X_V2_cff',
    'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_noIso_V2_cff',
    'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_iso_V2_cff'
    ]

import pkgutil
if pkgutil.find_loader(_fall17V2EleIDModules[0]) != None:
    _defaultEleIDModules.extend(_fall17V2EleIDModules)
else:
    print "EgammaPostRecoTools: Fall17V2 electron modules not found, running ID without them. If you want Fall17V2 IDs, please merge the approprate PR\n  94X:  git cms-merge-topic cms-egamma/EgammaID_949"

if pkgutil.find_loader(_fall17V2PhoMVAIDModules[0]) != None:
    _defaultPhoIDModules.extend(_fall17V2PhoMVAIDModules)
else:
    print "EgammaPostRecoTools: Fall17V2 MVA photon modules not found, running ID without them. If you want Fall17V2 MVA Photon IDs, please merge the approprate PR\n  94X:  git cms-merge-topic cms-egamma/EgammaID_949\n  102X: git cms-merge-topic cms-egamma/EgammaID_1023"

if pkgutil.find_loader(_fall17V2PhoCutIDModules[0]) != None:
    _defaultPhoIDModules.extend(_fall17V2PhoCutIDModules)
else:
    print "EgammaPostRecoTools: Fall17V2 cut based Photons ID modules not found, running ID without them. If you want Fall17V2 CutBased Photon IDs, please merge the approprate PR\n  94X:  git cms-merge-topic cms-egamma/EgammaID_949\n  102X: git cms-merge-topic cms-egamma/EgammaID_1023"

def _getEnergyCorrectionFile(era):
示例#35
0
import logging
import numpy as np
import itertools
import pkgutil

# Logsumexp options
from scipy.misc import logsumexp as logsumexp_scipy  # always import as double safe method (slow)

if pkgutil.find_loader('sselogsumexp') is not None:
    logging.info("Using fast logsumexp")
    from sselogsumexp import logsumexp

else:
    logging.info("Using scipy (slower) logsumexp")
    from scipy.misc import logsumexp

from Node import Node

logger = logging.getLogger(__name__)

CLONAL_CLUSTER = 1


class Tree:
    def __init__(self, nodes=None, edges=None, root=None):
        # dictionary of node id: node instance
        self._nodes = nodes if nodes else {}
        # list of tuples (parent_id, child_id)
        self._edges = edges if edges else []
        # pointer to the root of the tree
        self._root = root
示例#36
0
    try:
        import otp_yubikey
        INSTALLED_APPS = INSTALLED_APPS + ('otp_yubikey', )
    except ImportError:
        pass

apps_file = os.path.join(BASE_DIR, 'fir', 'config', 'installed_apps.txt')
if os.path.exists(apps_file):
    apps = list(INSTALLED_APPS)
    with open(apps_file) as f:
        for line in f.readlines():
            line = line.strip()
            if line != "":
                apps.append(line)
                settings = '{}.settings'.format(line)
                if find_loader(settings):
                    globals().update(import_module(settings).__dict__)

    INSTALLED_APPS = tuple(apps)

TEMPLATES = [{
    'BACKEND': 'django.template.backends.django.DjangoTemplates',
    'OPTIONS': {
        'context_processors':
        ("django.contrib.auth.context_processors.auth",
         "django.template.context_processors.debug",
         "django.template.context_processors.i18n",
         "django.template.context_processors.media",
         "django.template.context_processors.static",
         "django.template.context_processors.request",
         "django.contrib.messages.context_processors.messages")
示例#37
0
    def checkRunnableBase(self, options):
        """
        Method to check for caveats that would prevent this tester from
        executing correctly (or not at all).

        DO NOT override this method. Instead, see .checkRunnable()
        """
        reasons = {}
        checks = options._checks

        tag_match = False
        for t in self.tags:
            if t in options.runtags:
                tag_match = True
                break
        if len(options.runtags) > 0 and not tag_match:
            self.setStatus(self.silent)
            return False

        # If something has already deemed this test a failure
        if self.isFail():
            return False

        # Check if we only want to run syntax tests
        if options.check_input and not self.specs['check_input']:
            self.setStatus(self.silent)
            return False

        # Check if we want to exclude syntax tests
        if options.no_check_input and self.specs['check_input']:
            self.setStatus(self.silent)
            return False

        # Are we running only tests in a specific group?
        if options.group <> 'ALL' and options.group not in self.specs['group']:
            self.setStatus(self.silent)
            return False
        if options.not_group <> '' and options.not_group in self.specs['group']:
            self.setStatus(self.silent)
            return False

        # Store regexp for matching tests if --re is used
        if options.reg_exp:
            match_regexp = re.compile(options.reg_exp)

        # If --re then only test matching regexp. Needs to run before other SKIP methods
        # This also needs to be in its own bucket group. We normally print skipped messages.
        # But we do not want to print tests that didn't match regex.
        if options.reg_exp and not match_regexp.search(self.specs['test_name']):
            self.setStatus(self.silent)
            return False

        # Short circuit method and run this test if we are ignoring all caveats
        if options.ignored_caveats == 'all':
            # Still, we should abide by the derived classes
            return self.checkRunnable(options)

        # Check for deleted tests
        if self.specs.isValid('deleted'):
            reasons['deleted'] = str(self.specs['deleted'])

        # Skipped by external means (example: TestHarness part2 with --check-input)
        if self.isSkip() and self.getStatusMessage():
            reasons['skip'] = self.getStatusMessage()
        # Test is skipped
        elif self.specs.type('skip') is bool and self.specs['skip']:
            # Backwards compatible (no reason)
            reasons['skip'] = 'no reason'
        elif self.specs.type('skip') is not bool and self.specs.isValid('skip'):
            reasons['skip'] = self.specs['skip']
        # If were testing for SCALE_REFINE, then only run tests with a SCALE_REFINE set
        elif (options.scaling) and self.specs['scale_refine'] == 0:
            self.setStatus(self.silent)
            return False
        # If we're testing with valgrind, then skip tests that require parallel or threads or don't meet the valgrind setting
        elif options.valgrind_mode != '':
            tmp_reason = ''
            if self.specs['valgrind'].upper() == 'NONE':
                tmp_reason = 'Valgrind==NONE'
            elif self.specs['valgrind'].upper() == 'HEAVY' and options.valgrind_mode.upper() == 'NORMAL':
                tmp_reason = 'Valgrind==HEAVY'
            elif int(self.specs['min_threads']) > 1:
                tmp_reason = 'Valgrind requires non-threaded'
            elif self.specs["check_input"]:
                tmp_reason = 'check_input==True'
            if tmp_reason != '':
                reasons['valgrind'] = tmp_reason
        # If we're running in recover mode skip tests that have recover = false
        elif options.enable_recover and self.specs['recover'] == False:
            reasons['recover'] = 'NO RECOVER'

        # Check for PETSc versions
        (petsc_status, petsc_version) = util.checkPetscVersion(checks, self.specs)
        if not petsc_status:
            reasons['petsc_version'] = 'using PETSc ' + str(checks['petsc_version']) + ' REQ: ' + petsc_version

        # Check for SLEPc versions
        (slepc_status, slepc_version) = util.checkSlepcVersion(checks, self.specs)
        if not slepc_status and len(self.specs['slepc_version']) != 0:
            if slepc_version != None:
                reasons['slepc_version'] = 'using SLEPc ' + str(checks['slepc_version']) + ' REQ: ' + slepc_version
            elif slepc_version == None:
                reasons['slepc_version'] = 'SLEPc is not installed'

        # PETSc and SLEPc is being explicitly checked above
        local_checks = ['platform', 'compiler', 'mesh_mode', 'ad_mode', 'method', 'library_mode', 'dtk', 'unique_ids', 'vtk', 'tecplot',
                        'petsc_debug', 'curl', 'superlu', 'cxx11', 'asio', 'unique_id', 'slepc', 'petsc_version_release', 'boost', 'fparser_jit',
                        'parmetis', 'chaco', 'party', 'ptscotch', 'threading']
        for check in local_checks:
            test_platforms = set()
            operator_display = '!='
            inverse_set = False
            for x in self.specs[check]:
                if x[0] == '!':
                    if inverse_set:
                        reasons[check] = 'Multiple Negation Unsupported'
                    inverse_set = True
                    operator_display = '=='
                    x = x[1:] # Strip off the !
                x_upper = x.upper()
                if x_upper in test_platforms:
                    reasons[x_upper] = 'Duplicate Entry or Negative of Existing Entry'
                test_platforms.add(x.upper())

            match_found = len(test_platforms.intersection(checks[check])) > 0
            # Either we didn't find the match when we were using normal "include" logic
            # or we did find the match when we wanted to exclude it
            if inverse_set == match_found:
                reasons[check] = re.sub(r'\[|\]', '', check).upper() + operator_display + ', '.join(test_platforms)

        # Check for heavy tests
        if options.all_tests or options.heavy_tests:
            if not self.specs['heavy'] and options.heavy_tests:
                reasons['heavy'] = 'NOT HEAVY'
        elif self.specs['heavy']:
            reasons['heavy'] = 'HEAVY'

        # There should only be one entry in self.specs['dof_id_bytes']
        for x in self.specs['dof_id_bytes']:
            if x != 'ALL' and not x in checks['dof_id_bytes']:
                reasons['dof_id_bytes'] = '--with-dof-id-bytes!=' + x

        # Check to make sure depend files exist
        for file in self.specs['depend_files']:
            if not os.path.isfile(os.path.join(self.specs['base_dir'], file)):
                reasons['depend_files'] = 'DEPEND FILES'

        # We calculate the exe_objects only if we need them
        if self.specs["required_objects"] and checks["exe_objects"] is None:
            checks["exe_objects"] = util.getExeObjects(self.specs["executable"])

        # Check to see if we have the required object names
        for var in self.specs['required_objects']:
            if var not in checks["exe_objects"]:
                reasons['required_objects'] = '%s not found in executable' % var
                break

        # We extract the registered apps only if we need them
        if self.specs["required_applications"] and checks["registered_apps"] is None:
            checks["registered_apps"] = util.getExeRegisteredApps(self.specs["executable"])

        # Check to see if we have the required application names
        for var in self.specs['required_applications']:
            if var not in checks["registered_apps"]:
                reasons['required_applications'] = 'App %s not registered in executable' % var
                break

        # Check to make sure required submodules are initialized
        for var in self.specs['required_submodule']:
            if var not in checks["submodules"]:
                reasons['required_submodule'] = '%s submodule not initialized' % var

        # Check to make sure environment variable exists
        for var in self.specs['env_vars']:
            if not os.environ.has_key(var):
                reasons['env_vars'] = 'ENV VAR NOT SET'

        # Check for display
        if self.specs['display_required'] and not os.getenv('DISPLAY', False):
            reasons['display_required'] = 'NO DISPLAY'

        # Check for sympy
        if self.specs['sympy'] and pkgutil.find_loader('sympy') is None:
            reasons['python_package_required'] = 'NO SYMPY'

        # Remove any matching user supplied caveats from accumulated checkRunnable caveats that
        # would normally produce a skipped test.
        caveat_list = set()
        if options.ignored_caveats:
            caveat_list = set([x.lower() for x in options.ignored_caveats.split()])

        if len(set(reasons.keys()) - caveat_list) > 0:
            tmp_reason = []
            for key, value in reasons.iteritems():
                if key.lower() not in caveat_list:
                    tmp_reason.append(value)

            flat_reason = ', '.join(tmp_reason)

            # If the test is deleted we still need to treat this differently
            self.addCaveats(flat_reason)
            if 'deleted' in reasons.keys():
                if options.extra_info:
                    self.setStatus(self.deleted)
                else:
                    self.setStatus(self.silent)
            else:
                self.setStatus(self.skip)
            return False

        # Check the return values of the derived classes
        self._runnable = self.checkRunnable(options)
        return self._runnable
示例#38
0
def apply(log, parameters=None):
    """
    Apply the IMDF algorithm to a log obtaining a Petri net along with an initial and final marking

    Parameters
    -----------
    log
        Log
    parameters
        Parameters of the algorithm, including:
            Parameters.ACTIVITY_KEY -> attribute of the log to use as activity name
            (default concept:name)

    Returns
    -----------
    net
        Petri net
    initial_marking
        Initial marking
    final_marking
        Final marking
    """
    if parameters is None:
        parameters = {}
    case_id_glue = exec_utils.get_param_value(
        Parameters.CASE_ID_KEY, parameters, pmutil.constants.CASE_CONCEPT_NAME)
    activity_key = exec_utils.get_param_value(
        Parameters.ACTIVITY_KEY, parameters,
        pmutil.xes_constants.DEFAULT_NAME_KEY)
    start_timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY,
                                                     parameters, None)
    timestamp_key = exec_utils.get_param_value(
        Parameters.TIMESTAMP_KEY, parameters,
        pmutil.xes_constants.DEFAULT_TIMESTAMP_KEY)
    if pkgutil.find_loader("pandas"):
        import pandas
        from pm4py.statistics.attributes.pandas import get as pd_attributes_stats
        from pm4py.statistics.end_activities.pandas import get as pd_end_act_stats
        from pm4py.statistics.start_activities.pandas import get as pd_start_act_stats
        if isinstance(log, pandas.core.frame.DataFrame):
            dfg = df_statistics.get_dfg_graph(
                log,
                case_id_glue=case_id_glue,
                activity_key=activity_key,
                timestamp_key=timestamp_key,
                start_timestamp_key=start_timestamp_key)
            start_activities = pd_start_act_stats.get_start_activities(
                log, parameters=parameters)
            end_activities = pd_end_act_stats.get_end_activities(
                log, parameters=parameters)
            activities = pd_attributes_stats.get_attribute_values(
                log, activity_key, parameters=parameters)
            return apply_dfg(dfg,
                             activities=activities,
                             start_activities=start_activities,
                             end_activities=end_activities,
                             parameters=parameters)
    log = log_conversion.apply(log, parameters, log_conversion.TO_EVENT_LOG)
    tree = apply_tree(log, parameters=parameters)
    net, initial_marking, final_marking = tree_to_petri.apply(tree)
    return net, initial_marking, final_marking
示例#39
0
# This file is part of the django-environ.
#
# Copyright (c) 2021, Serghei Iakovlev <*****@*****.**>
# Copyright (c) 2013-2021, Daniele Faraglia <*****@*****.**>
#
# For the full copyright and license information, please view
# the LICENSE.txt file that was distributed with this source code.
"""This module handles import compatibility issues."""

from pkgutil import find_loader

if find_loader('simplejson'):
    import simplejson as json
else:
    import json

if find_loader('django'):
    from django import VERSION as DJANGO_VERSION
    from django.core.exceptions import ImproperlyConfigured
else:
    DJANGO_VERSION = None

    class ImproperlyConfigured(Exception):
        pass


# back compatibility with django postgresql package
if DJANGO_VERSION is not None and DJANGO_VERSION < (2, 0):
    DJANGO_POSTGRES = 'django.db.backends.postgresql_psycopg2'
else:
    # https://docs.djangoproject.com/en/2.0/releases/2.0/#id1
示例#40
0
def import_from_string(log_string, parameters=None):
    """
    Deserialize a text/binary string representing a XES log

    Parameters
    -----------
    log_string
        String that contains the XES
    parameters
        Parameters of the algorithm, including
            Parameters.TIMESTAMP_SORT -> Specify if we should sort log by timestamp
            Parameters.TIMESTAMP_KEY -> If sort is enabled, then sort the log by using this key
            Parameters.REVERSE_SORT -> Specify in which direction the log should be sorted
            Parameters.INSERT_TRACE_INDICES -> Specify if trace indexes should be added as event attribute for each event
            Parameters.MAX_TRACES -> Specify the maximum number of traces to import from the log (read in order in the XML file)
            Parameters.SHOW_PROGRESS_BAR -> Enables/disables the progress bar (default: True)
            Parameters.ENCODING -> regulates the encoding (default: utf-8)

    Returns
    -----------
    log
        Trace log object
    """
    from lxml import etree

    if parameters is None:
        parameters = {}

    encoding = exec_utils.get_param_value(Parameters.ENCODING, parameters,
                                          constants.DEFAULT_ENCODING)
    show_progress_bar = exec_utils.get_param_value(
        Parameters.SHOW_PROGRESS_BAR, parameters, True)
    decompress_serialization = exec_utils.get_param_value(
        Parameters.DECOMPRESS_SERIALIZATION, parameters, False)

    if type(log_string) is str:
        log_string = log_string.encode(constants.DEFAULT_ENCODING)

    if pkgutil.find_loader("tqdm") and show_progress_bar:
        # first iteration: count the number of traces
        b = BytesIO(log_string)
        if decompress_serialization:
            s = gzip.GzipFile(fileobj=b, mode="rb")
        else:
            s = b
        context = etree.iterparse(s,
                                  events=[_EVENT_START, _EVENT_END],
                                  encoding=encoding)
        num_traces = count_traces(context)
    else:
        # avoid the iteration to calculate the number of traces is "tqdm" is not used
        num_traces = 0

    # second iteration: actually read the content
    b = BytesIO(log_string)
    if decompress_serialization:
        s = gzip.GzipFile(fileobj=b, mode="rb")
    else:
        s = b
    context = etree.iterparse(s,
                              events=[_EVENT_START, _EVENT_END],
                              encoding=encoding)

    log = EventLog()
    return import_from_context(context, num_traces, log, parameters=parameters)
示例#41
0
from os.path import normcase
from os.path import normpath
from os.path import realpath
from pkgutil import find_loader
from tempfile import NamedTemporaryFile
from threading import Timer
from time import sleep

try:
    from PIL import Image
except ImportError:
    import Image

tesseract_cmd = r'D:\\Program Files\\Tesseract-OCR\\tesseract.exe'

numpy_installed = find_loader('numpy') is not None
if numpy_installed:
    from numpy import ndarray

pandas_installed = find_loader('pandas') is not None
if pandas_installed:
    import pandas as pd

DEFAULT_ENCODING = 'utf-8'
LANG_PATTERN = re.compile('^[a-z_]+$')
RGB_MODE = 'RGB'
SUPPORTED_FORMATS = {
    'JPEG',
    'PNG',
    'PBM',
    'PGM',
示例#42
0
def import_from_context(context, num_traces, log, parameters=None):
    """
    Import a XES log from an iterparse context

    Parameters
    --------------
    context
        Iterparse context
    num_traces
        Number of traces of the XES log
    log
        Event log (empty)
    parameters
        Parameters of the algorithm

    Returns
    --------------
    log
        Event log (filled with the contents of the XES log)
    """
    if parameters is None:
        parameters = {}

    max_no_traces_to_import = exec_utils.get_param_value(
        Parameters.MAX_TRACES, parameters, sys.maxsize)
    timestamp_sort = exec_utils.get_param_value(Parameters.TIMESTAMP_SORT,
                                                parameters, False)
    timestamp_key = exec_utils.get_param_value(
        Parameters.TIMESTAMP_KEY, parameters,
        xes_constants.DEFAULT_TIMESTAMP_KEY)
    reverse_sort = exec_utils.get_param_value(Parameters.REVERSE_SORT,
                                              parameters, False)
    show_progress_bar = exec_utils.get_param_value(
        Parameters.SHOW_PROGRESS_BAR, parameters, True)

    date_parser = dt_parser.get()
    progress = None
    if pkgutil.find_loader("tqdm") and show_progress_bar:
        from tqdm.auto import tqdm
        progress = tqdm(total=num_traces,
                        desc="parsing log, completed traces :: ")

    trace = None
    event = None

    tree = {}
    compression_dictio = {}

    for tree_event, elem in context:
        if tree_event == _EVENT_START:  # starting to read
            parent = tree[
                elem.getparent()] if elem.getparent() in tree else None

            if elem.tag.endswith(xes_constants.TAG_STRING):
                if parent is not None:
                    tree = __parse_attribute(elem, parent,
                                             elem.get(xes_constants.KEY_KEY),
                                             elem.get(xes_constants.KEY_VALUE),
                                             tree, compression_dictio)
                continue

            elif elem.tag.endswith(xes_constants.TAG_DATE):
                try:
                    dt = date_parser.apply(elem.get(xes_constants.KEY_VALUE))
                    tree = __parse_attribute(elem, parent,
                                             elem.get(xes_constants.KEY_KEY),
                                             dt, tree, compression_dictio)
                except TypeError:
                    logging.info("failed to parse date: " +
                                 str(elem.get(xes_constants.KEY_VALUE)))
                except ValueError:
                    logging.info("failed to parse date: " +
                                 str(elem.get(xes_constants.KEY_VALUE)))
                continue

            elif elem.tag.endswith(xes_constants.TAG_EVENT):
                if event is not None:
                    raise SyntaxError(
                        'file contains <event> in another <event> tag')
                event = Event()
                tree[elem] = event
                continue

            elif elem.tag.endswith(xes_constants.TAG_TRACE):
                if len(log) >= max_no_traces_to_import:
                    break
                if trace is not None:
                    raise SyntaxError(
                        'file contains <trace> in another <trace> tag')
                trace = Trace()
                tree[elem] = trace.attributes
                continue

            elif elem.tag.endswith(xes_constants.TAG_FLOAT):
                if parent is not None:
                    try:
                        val = float(elem.get(xes_constants.KEY_VALUE))
                        tree = __parse_attribute(
                            elem, parent, elem.get(xes_constants.KEY_KEY), val,
                            tree, compression_dictio)
                    except ValueError:
                        logging.info("failed to parse float: " +
                                     str(elem.get(xes_constants.KEY_VALUE)))
                continue

            elif elem.tag.endswith(xes_constants.TAG_INT):
                if parent is not None:
                    try:
                        val = int(elem.get(xes_constants.KEY_VALUE))
                        tree = __parse_attribute(
                            elem, parent, elem.get(xes_constants.KEY_KEY), val,
                            tree, compression_dictio)
                    except ValueError:
                        logging.info("failed to parse int: " +
                                     str(elem.get(xes_constants.KEY_VALUE)))
                continue

            elif elem.tag.endswith(xes_constants.TAG_BOOLEAN):
                if parent is not None:
                    try:
                        val0 = elem.get(xes_constants.KEY_VALUE)
                        val = False
                        if str(val0).lower() == "true":
                            val = True
                        tree = __parse_attribute(
                            elem, parent, elem.get(xes_constants.KEY_KEY), val,
                            tree, compression_dictio)
                    except ValueError:
                        logging.info("failed to parse boolean: " +
                                     str(elem.get(xes_constants.KEY_VALUE)))
                continue

            elif elem.tag.endswith(xes_constants.TAG_LIST):
                if parent is not None:
                    # lists have no value, hence we put None as a value
                    tree = __parse_attribute(elem, parent,
                                             elem.get(xes_constants.KEY_KEY),
                                             None, tree, compression_dictio)
                continue

            elif elem.tag.endswith(xes_constants.TAG_ID):
                if parent is not None:
                    tree = __parse_attribute(elem, parent,
                                             elem.get(xes_constants.KEY_KEY),
                                             elem.get(xes_constants.KEY_VALUE),
                                             tree, compression_dictio)
                continue

            elif elem.tag.endswith(xes_constants.TAG_EXTENSION):
                if elem.get(xes_constants.KEY_NAME) is not None and elem.get(
                        xes_constants.KEY_PREFIX) is not None and elem.get(
                            xes_constants.KEY_URI) is not None:
                    log.extensions[elem.get(xes_constants.KEY_NAME)] = {
                        xes_constants.KEY_PREFIX:
                        elem.get(xes_constants.KEY_PREFIX),
                        xes_constants.KEY_URI:
                        elem.get(xes_constants.KEY_URI)
                    }
                continue

            elif elem.tag.endswith(xes_constants.TAG_GLOBAL):
                if elem.get(xes_constants.KEY_SCOPE) is not None:
                    log.omni_present[elem.get(xes_constants.KEY_SCOPE)] = {}
                    tree[elem] = log.omni_present[elem.get(
                        xes_constants.KEY_SCOPE)]
                continue

            elif elem.tag.endswith(xes_constants.TAG_CLASSIFIER):
                if elem.get(xes_constants.KEY_KEYS) is not None:
                    classifier_value = elem.get(xes_constants.KEY_KEYS)
                    if "'" in classifier_value:
                        log.classifiers[elem.get(xes_constants.KEY_NAME)] = [
                            x for x in classifier_value.split("'")
                            if x.strip()
                        ]
                    else:
                        log.classifiers[elem.get(xes_constants.KEY_NAME
                                                 )] = classifier_value.split()
                continue

            elif elem.tag.endswith(xes_constants.TAG_LOG):
                tree[elem] = log.attributes
                continue

        elif tree_event == _EVENT_END:
            if elem in tree:
                del tree[elem]
            elem.clear()
            if elem.getprevious() is not None:
                try:
                    del elem.getparent()[0]
                except TypeError:
                    pass

            if elem.tag.endswith(xes_constants.TAG_EVENT):
                if trace is not None:
                    trace.append(event)
                    event = None
                continue

            elif elem.tag.endswith(xes_constants.TAG_TRACE):
                log.append(trace)

                if progress is not None:
                    progress.update()

                trace = None
                continue

            elif elem.tag.endswith(xes_constants.TAG_LOG):
                continue

    # gracefully close progress bar
    if progress is not None:
        progress.close()
    del context, progress

    if timestamp_sort:
        log = sorting.sort_timestamp(log,
                                     timestamp_key=timestamp_key,
                                     reverse_sort=reverse_sort)

    return log
示例#43
0
#!/usr/bin/env python
import os
import pkgutil
import sys
from glob import glob
from setuptools import setup, find_packages, Extension
from subprocess import check_call, CalledProcessError


if not pkgutil.find_loader('relic'):
    relic_local = os.path.exists('relic')
    relic_submodule = (relic_local and
                       os.path.exists('.gitmodules') and
                       not os.listdir('relic'))
    try:
        if relic_submodule:
            check_call(['git', 'submodule', 'update', '--init', '--recursive'])
        elif not relic_local:
            check_call(['git', 'clone', 'https://github.com/spacetelescope/relic.git'])

        sys.path.insert(1, 'relic')
    except CalledProcessError as e:
        print(e)
        exit(1)

import relic.release

version = relic.release.get_info()
relic.release.write_template(version, 'stistools')

setup(
示例#44
0
def export_log_line_by_line(log, fp_obj, encoding, parameters=None):
    """
    Exports the contents of the log line-by-line
    to a file object

    Parameters
    --------------
    log
        Event log
    fp_obj
        File object
    encoding
        Encoding
    parameters
        Parameters of the algorithm
    """
    if parameters is None:
        parameters = {}

    show_progress_bar = exec_utils.get_param_value(Parameters.SHOW_PROGRESS_BAR, parameters, True)

    progress = None
    if pkgutil.find_loader("tqdm") and show_progress_bar:
        from tqdm.auto import tqdm
        progress = tqdm(total=len(log), desc="exporting log, completed traces :: ")

    fp_obj.write(("<?xml version=\"1.0\" encoding=\"" + encoding + "\" ?>\n").encode(
        encoding))
    fp_obj.write(("<log "+xes_util.TAG_VERSION+"=\""+xes_util.VALUE_XES_VERSION+"\" "+xes_util.TAG_FEATURES+"=\""+xes_util.VALUE_XES_FEATURES+"\" "+xes_util.TAG_XMLNS+"=\""+xes_util.VALUE_XMLNS+"\">\n").encode(encoding))
    for ext_name, ext_value in log.extensions.items():
        fp_obj.write((get_tab_indent(1) + "<extension name=\"%s\" prefix=\"%s\" uri=\"%s\" />\n" % (
            ext_name, ext_value[xes_util.KEY_PREFIX], ext_value[xes_util.KEY_URI])).encode(encoding))
    for clas_name, clas_attributes in log.classifiers.items():
        fp_obj.write((get_tab_indent(1) + "<classifier name=\"%s\" keys=\"%s\" />\n" % (
            clas_name, " ".join(clas_attributes))).encode(encoding))
    for attr_name, attr_value in log.attributes.items():
        fp_obj.write(export_attribute(attr_name, attr_value, 1).encode(encoding))
    for scope in log.omni_present:
        fp_obj.write((get_tab_indent(1) + "<global scope=\"%s\">\n" % (scope)).encode(encoding))
        for attr_name, attr_value in log.omni_present[scope].items():
            fp_obj.write(export_attribute(attr_name, attr_value, 2).encode(encoding))
        fp_obj.write((get_tab_indent(1) + "</global>\n").encode(encoding))
    for trace in log:
        fp_obj.write((get_tab_indent(1) + "<trace>\n").encode(encoding))
        for attr_name, attr_value in trace.attributes.items():
            fp_obj.write(export_attribute(attr_name, attr_value, 2).encode(encoding))
        for event in trace:
            fp_obj.write((get_tab_indent(2) + "<event>\n").encode(encoding))
            for attr_name, attr_value in event.items():
                fp_obj.write(export_attribute(attr_name, attr_value, 3).encode(encoding))
            fp_obj.write((get_tab_indent(2) + "</event>\n").encode(encoding))
        fp_obj.write((get_tab_indent(1) + "</trace>\n").encode(encoding))
        if progress is not None:
            progress.update()

    # gracefully close progress bar
    if progress is not None:
        progress.close()
    del progress

    fp_obj.write("</log>\n".encode(encoding))
示例#45
0
try:
    import Image
except ImportError:
    from PIL import Image

import os
import sys
import subprocess
from pkgutil import find_loader
import tempfile
import shlex
from glob import iglob
from distutils.version import LooseVersion

numpy_installed = find_loader('numpy') is not None
if numpy_installed:
    from numpy import ndarray

# CHANGE THIS IF TESSERACT IS NOT IN YOUR PATH, OR IS NAMED DIFFERENTLY
tesseract_cmd = r'D:\ruanjian\tesserct\Tesseract-OCR\tesseract.exe'
RGB_MODE = 'RGB'
OSD_KEYS = {
    'Page number': ('page_num', int),
    'Orientation in degrees': ('orientation', int),
    'Rotate': ('rotate', int),
    'Orientation confidence': ('orientation_conf', float),
    'Script': ('script', str),
    'Script confidence': ('script_conf', float)
}
示例#46
0
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import qctoolkit as qtk
from numbers import Number
import pickle
import gzip
import copy
import numpy as np
from numpy.polynomial.hermite_e import hermeval
import matplotlib.pyplot as plt
import pkgutil
from scipy.special import binom
fft_eggs_loader = pkgutil.find_loader('pyfftw')
fft_found = fft_eggs_loader is not None
if fft_found:
    import pyfftw.interfaces.numpy_fft as fft
skl_eggs_loader = pkgutil.find_loader('pyfftw')
skl_found = skl_eggs_loader is not None
if skl_found:
    from sklearn.cross_validation import ShuffleSplit
    from sklearn.cross_validation import cross_val_score
    from sklearn.linear_model import Ridge
    from sklearn.kernel_ridge import KernelRidge
    from sklearn.externals.joblib import Parallel, delayed


def make_dirac_densities(x,
                         z,
                         grid_step=.01,
                         left_bound=None,
                         right_bound=None,
示例#47
0
#!/usr/bin/env python2

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import ctypes
import torch
import random
import numpy
import os

import pkgutil

if pkgutil.find_loader("adversarial") is not None:
    # If adversarial module is created by pip install
    QUILTING_LIB = ctypes.cdll.LoadLibrary(
        os.path.join(os.path.dirname(__file__), "libquilting.so"))
else:
    try:
        QUILTING_LIB = ctypes.cdll.LoadLibrary("libquilting.so")
    except ImportError:
        raise ImportError("libquilting.so not found. Check build script")


def generate_patches(img, patch_size, overlap):
    assert torch.is_tensor(img) and img.dim() == 3
    assert type(patch_size) == int and patch_size > 0
    assert type(overlap) == int and overlap > 0
    assert patch_size > overlap
示例#48
0
    :return: Normalized XHTML content.
    """
    out = StringIO()
    normalizer = HTMLNormalizer(omit_tags=omit_tags, omit_attrs=omit_attrs)
    with redirect_stdout(out):
        normalizer.feed(document)
    return out.getvalue()


###########################################################
# DATA EXTRACTION OPERATIONS
###########################################################

# sigalias: XPathResult = Union[Sequence[str], Sequence[Element]]

_USE_LXML = find_loader('lxml') is not None
if _USE_LXML:
    _logger.info('using lxml')
    from lxml import etree as ElementTree
    from lxml.etree import Element

    XPath = ElementTree.XPath
    xpath = ElementTree._Element.xpath
else:
    from xml.etree import ElementTree
    from xml.etree.ElementTree import Element

    class XPath:
        """An XPath expression evaluator.

        This class is mainly needed to compensate for the lack of ``text()``
示例#49
0
    with tokenizer_context(BertTokenizer.__name__):
        doc = Doc(text)

        assert doc[0].tokens == ['Merhaba', 'dünya', '.']
        assert doc[1].tokens == ['Barış', 'için', 'geldik', '.']


def test_singleton_tokenizer():
    st1 = WordTokenizer.factory('simple')
    st2 = WordTokenizer.factory('simple-tokenizer')
    st3 = WordTokenizer.factory('SimpleTokenizer')

    assert st1 == st2 == st3


if pkgutil.find_loader("transformers") is not None:
    bt1 = WordTokenizer.factory('bert')
    bt2 = WordTokenizer.factory('bert-tokenizer')
    bt3 = WordTokenizer.factory('BERTTokenizer')

    assert bt1 == bt2 == bt3

bt1 = WordTokenizer.factory('icu')
bt2 = WordTokenizer.factory('icu-tokenizer')
bt3 = WordTokenizer.factory('ICUTokenizer')

assert bt1 == bt2 == bt3


@pytest.mark.parametrize("toker", ["bert", "simple", "icu"])
def test_word_counting(toker):
示例#50
0
def config_postprocessing():
    section_data = {}

    section_data['naming'] = {}
    section_data['naming']['pattern'] = app.NAMING_PATTERN
    section_data['naming']['multiEp'] = int(app.NAMING_MULTI_EP)
    section_data['naming']['patternAirByDate'] = app.NAMING_ABD_PATTERN
    section_data['naming']['patternSports'] = app.NAMING_SPORTS_PATTERN
    section_data['naming']['patternAnime'] = app.NAMING_ANIME_PATTERN
    section_data['naming']['enableCustomNamingAirByDate'] = bool(
        app.NAMING_CUSTOM_ABD)
    section_data['naming']['enableCustomNamingSports'] = bool(
        app.NAMING_CUSTOM_SPORTS)
    section_data['naming']['enableCustomNamingAnime'] = bool(
        app.NAMING_CUSTOM_ANIME)
    section_data['naming']['animeMultiEp'] = int(app.NAMING_ANIME_MULTI_EP)
    section_data['naming']['animeNamingType'] = int_default(
        app.NAMING_ANIME, 3)
    section_data['naming']['stripYear'] = bool(app.NAMING_STRIP_YEAR)
    section_data['showDownloadDir'] = app.TV_DOWNLOAD_DIR
    section_data['processAutomatically'] = bool(app.PROCESS_AUTOMATICALLY)
    section_data['postponeIfSyncFiles'] = bool(app.POSTPONE_IF_SYNC_FILES)
    section_data['postponeIfNoSubs'] = bool(app.POSTPONE_IF_NO_SUBS)
    section_data['renameEpisodes'] = bool(app.RENAME_EPISODES)
    section_data['createMissingShowDirs'] = bool(app.CREATE_MISSING_SHOW_DIRS)
    section_data['addShowsWithoutDir'] = bool(app.ADD_SHOWS_WO_DIR)
    section_data['moveAssociatedFiles'] = bool(app.MOVE_ASSOCIATED_FILES)
    section_data['nfoRename'] = bool(app.NFO_RENAME)
    section_data['airdateEpisodes'] = bool(app.AIRDATE_EPISODES)
    section_data['unpack'] = bool(app.UNPACK)
    section_data['deleteRarContent'] = bool(app.DELRARCONTENTS)
    section_data['noDelete'] = bool(app.NO_DELETE)
    section_data['processMethod'] = app.PROCESS_METHOD
    section_data['reflinkAvailable'] = bool(pkgutil.find_loader('reflink'))
    section_data['autoPostprocessorFrequency'] = int(
        app.AUTOPOSTPROCESSOR_FREQUENCY)
    section_data['syncFiles'] = app.SYNC_FILES
    section_data['fileTimestampTimezone'] = app.FILE_TIMESTAMP_TIMEZONE
    section_data['allowedExtensions'] = app.ALLOWED_EXTENSIONS
    section_data['extraScripts'] = app.EXTRA_SCRIPTS
    section_data['extraScriptsUrl'] = app.EXTRA_SCRIPTS_URL
    section_data['multiEpStrings'] = {
        str(k): v
        for k, v in iteritems(common.MULTI_EP_STRINGS)
    }

    section_data['downloadHandler'] = {}
    section_data['downloadHandler']['enabled'] = bool(app.USE_DOWNLOAD_HANDLER)
    section_data['downloadHandler']['frequency'] = int_default(
        app.DOWNLOAD_HANDLER_FREQUENCY, app.DEFAULT_DOWNLOAD_HANDLER_FREQUENCY)
    section_data['downloadHandler']['minFrequency'] = int(
        app.MIN_DOWNLOAD_HANDLER_FREQUENCY)
    section_data['downloadHandler']['torrentSeedRatio'] = float(
        app.TORRENT_SEED_RATIO) if app.TORRENT_SEED_RATIO is not None else -1
    section_data['downloadHandler'][
        'torrentSeedAction'] = app.TORRENT_SEED_ACTION

    section_data['ffmpeg'] = {}
    section_data['ffmpeg']['checkStreams'] = bool(app.FFMPEG_CHECK_STREAMS)
    section_data['ffmpeg']['path'] = app.FFMPEG_PATH

    return section_data
示例#51
0
    def detect_cut(self, second_iteration=False):
        """
        Detect generally a cut in the graph (applying all the algorithms)
        """
        if pkgutil.find_loader("networkx"):
            import networkx as nx
        else:
            msg = "networkx is not available. inductive miner cannot be used!"
            logging.error(msg)
            raise Exception(msg)

        if self.dfg:

            this_nx_graph = transform_dfg_to_directed_nx_graph(
                self.dfg, activities=self.activities)
            conn_components = detection_utils.get_connected_components(
                self.ingoing, self.outgoing, self.activities)
            strongly_connected_components = [
                list(x)
                for x in nx.strongly_connected_components(this_nx_graph)
            ]

            xor_cut = self.detect_xor_cut(conn_components, this_nx_graph,
                                          strongly_connected_components)

            if xor_cut[0]:
                for comp in xor_cut[1]:
                    new_dfg = filter_dfg_on_act(self.dfg, comp)
                    self.detected_cut = "xor"
                    self.children.append(
                        SubtreeDFGBased(new_dfg,
                                        self.master_dfg,
                                        self.initial_dfg,
                                        comp,
                                        self.counts,
                                        self.rec_depth + 1,
                                        noise_threshold=self.noise_threshold,
                                        initial_start_activities=self.
                                        initial_start_activities,
                                        initial_end_activities=self.
                                        initial_end_activities))
            else:
                seq_cut = self.detect_sequential_cut(
                    conn_components, this_nx_graph,
                    strongly_connected_components)
                if seq_cut[0]:
                    self.detected_cut = "sequential"
                    for child in seq_cut[1]:
                        dfg_child = filter_dfg_on_act(self.dfg, child)
                        self.children.append(
                            SubtreeDFGBased(
                                dfg_child,
                                self.master_dfg,
                                self.initial_dfg,
                                child,
                                self.counts,
                                self.rec_depth + 1,
                                noise_threshold=self.noise_threshold,
                                initial_start_activities=self.
                                initial_start_activities,
                                initial_end_activities=self.
                                initial_end_activities))
                    self.put_skips_in_seq_cut()
                else:
                    par_cut = self.detect_parallel_cut(
                        conn_components, this_nx_graph,
                        strongly_connected_components)
                    if par_cut[0]:
                        self.detected_cut = "parallel"
                        for comp in par_cut[1]:
                            new_dfg = filter_dfg_on_act(self.dfg, comp)
                            self.children.append(
                                SubtreeDFGBased(
                                    new_dfg,
                                    self.master_dfg,
                                    new_dfg,
                                    comp,
                                    self.counts,
                                    self.rec_depth + 1,
                                    noise_threshold=self.noise_threshold,
                                    initial_start_activities=self.
                                    initial_start_activities,
                                    initial_end_activities=self.
                                    initial_end_activities))
                    else:
                        loop_cut = self.detect_loop_cut(
                            conn_components, this_nx_graph,
                            strongly_connected_components)
                        if loop_cut[0]:
                            if loop_cut[2]:
                                self.detected_cut = "loopCut"
                                for index_enum, child in enumerate(
                                        loop_cut[1]):
                                    dfg_child = filter_dfg_on_act(
                                        self.dfg, child)
                                    next_subtree = SubtreeDFGBased(
                                        dfg_child,
                                        self.master_dfg,
                                        self.initial_dfg,
                                        child,
                                        self.counts,
                                        self.rec_depth + 1,
                                        noise_threshold=self.noise_threshold,
                                        initial_start_activities=self.
                                        initial_start_activities,
                                        initial_end_activities=self.
                                        initial_end_activities)
                                    if loop_cut[3]:
                                        next_subtree.must_insert_skip = True
                                    self.children.append(next_subtree)
                            else:
                                self.detected_cut = "sequential"
                                self.need_loop_on_subtree = True
                                for index_enum, child in enumerate(
                                        loop_cut[1]):
                                    dfg_child = filter_dfg_on_act(
                                        self.dfg, child)
                                    next_subtree = SubtreeDFGBased(
                                        dfg_child,
                                        self.master_dfg,
                                        self.initial_dfg,
                                        child,
                                        self.counts,
                                        self.rec_depth + 1,
                                        noise_threshold=self.noise_threshold,
                                        initial_start_activities=self.
                                        initial_start_activities,
                                        initial_end_activities=self.
                                        initial_end_activities)
                                    self.children.append(next_subtree)
                                    next_subtree.must_insert_skip = True
                        else:
                            if self.noise_threshold > 0:
                                if not second_iteration:
                                    self.initialize_tree(self.dfg,
                                                         self.initial_dfg,
                                                         None,
                                                         second_iteration=True)
                                else:
                                    self.detected_cut = "flower"
                            else:
                                self.detected_cut = "flower"

        else:
            self.detected_cut = "base_xor"
示例#52
0
def pip_exists():
    '''
    check if pip is existed
    '''
    import pkgutil
    return bool(pkgutil.find_loader("pip"))
示例#53
0
def apply(log: Union[EventLog, EventStream, pd.DataFrame],
          parameters: Optional[Dict[Any, Any]] = None,
          variant=DEFAULT_VARIANT) -> Dict[Tuple[str, str], float]:
    """
    Calculates DFG graph (frequency or performance) starting from a log

    Parameters
    ----------
    log
        Log
    parameters
        Possible parameters passed to the algorithms:
            Parameters.AGGREGATION_MEASURE -> performance aggregation measure (min, max, mean, median)
            Parameters.ACTIVITY_KEY -> Attribute to use as activity
            Parameters.TIMESTAMP_KEY -> Attribute to use as timestamp
    variant
        Variant of the algorithm to use, possible values:
            - Variants.NATIVE
            - Variants.FREQUENCY
            - Variants.FREQUENCY_GREEDY
            - Variants.PERFORMANCE
            - Variants.PERFORMANCE_GREEDY
            - Variants.FREQ_TRIPLES

    Returns
    -------
    dfg
        DFG graph
    """
    if parameters is None:
        parameters = {}
    activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY,
                                              parameters,
                                              xes_util.DEFAULT_NAME_KEY)
    start_timestamp_key = exec_utils.get_param_value(
        Parameters.START_TIMESTAMP_KEY, parameters, None)
    timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY,
                                               parameters,
                                               xes_util.DEFAULT_TIMESTAMP_KEY)
    case_id_glue = exec_utils.get_param_value(
        Parameters.CASE_ID_KEY, parameters, pmutil.constants.CASE_CONCEPT_NAME)

    if pkgutil.find_loader("pandas"):
        import pandas
        from pm4py.algo.discovery.dfg.adapters.pandas import df_statistics
        from pm4py.objects.log.util import dataframe_utils

        if isinstance(log, pandas.core.frame.DataFrame
                      ) and not variant == Variants.FREQ_TRIPLES:
            log = dataframe_utils.convert_timestamp_columns_in_df(
                log, timest_columns=[timestamp_key])
            dfg_frequency, dfg_performance = df_statistics.get_dfg_graph(
                log,
                measure="both",
                activity_key=activity_key,
                timestamp_key=timestamp_key,
                case_id_glue=case_id_glue,
                start_timestamp_key=start_timestamp_key)
            if variant in [Variants.PERFORMANCE, Variants.PERFORMANCE_GREEDY]:
                return dfg_performance
            else:
                return dfg_frequency

    return exec_utils.get_variant(variant).apply(log_conversion.apply(
        log, parameters, log_conversion.TO_EVENT_LOG),
                                                 parameters=parameters)
def get_default_plot_backend():
    backends = {"PyQt5": "Qt5Agg", "PyQt4": "Qt4Agg"}
    for pkg in backends:
        if pkgutil.find_loader(pkg) is not None:
            return backends[pkg]
    return "TkAgg"
示例#55
0
文件: utils.py 项目: phainom/sacred
def module_exists(modname):
    """Checks if a module exists without actually importing it."""
    return pkgutil.find_loader(modname) is not None
示例#56
0
from django.contrib import admin

from fir.config.base import INSTALLED_APPS
from incidents import views

# urls for core FIR components
urlpatterns = [
    url(r'^incidents/', include('incidents.urls', namespace='incidents')),
    url(r'^search/$', views.search, name='search'),
    url(r'^events/', include('incidents.custom_urls.events',
                             namespace='events')),
    url(r'^login/', views.user_login, name='login'),  # have a "main module"
    url(r'^logout/', views.user_logout, name='logout'),  # main module
    url(r'^stats/', include('incidents.custom_urls.stats', namespace='stats')),
    url(r'^ajax/', include('incidents.custom_urls.ajax', namespace='ajax')),
    url(r'^user/', include('incidents.custom_urls.user', namespace='user')),
    url(r'^dashboard/',
        include('incidents.custom_urls.dashboard', namespace='dashboard')),
    url(r'^admin/', include(admin.site.urls)),
    url(r'^$', views.dashboard_main),
]

for app in INSTALLED_APPS:
    if app.startswith('fir_'):
        app_name = app[4:]
        app_urls = '{}.urls'.format(app)
        if find_loader(app_urls):
            urlpatterns.append(
                url('^{}/'.format(app_name),
                    include(app_urls, namespace=app_name)))
示例#57
0
文件: setup.py 项目: daviddavis/bson
 def check_pymongo():
     if pkgutil.find_loader('pymongo'):
         return True
     return False
示例#58
0
import time, pkgutil, logging, sys

time.clock = time.process_time

try:
    import pm4pycvxopt
except:
    pass

from pm4py import util, objects, statistics, algo, visualization, evaluation, simulation

if pkgutil.find_loader("scipy"):
    pass
else:
    logging.error(
        "scipy is not available. This can lead some features of PM4Py to not work correctly!"
    )

if pkgutil.find_loader("sklearn"):
    pass
else:
    logging.error(
        "scikit-learn is not available. This can lead some features of PM4Py to not work correctly!"
    )

if pkgutil.find_loader("networkx"):
    pass
else:
    logging.error(
        "networkx is not available. This can lead some features of PM4Py to not work correctly!"
    )
示例#59
0
        if compiler.compiler_type != 'unix':
            return
        compiler.compiler[1:] = \
            filter_unsupported_flags(compiler.compiler[0],
                                     compiler.compiler[1:])
        compiler.compiler_so[1:] = \
            filter_unsupported_flags(compiler.compiler_so[0],
                                     compiler.compiler_so[1:])

    return patched


distutils.sysconfig.customize_compiler = \
    monkey_with_compiler(distutils.sysconfig.customize_compiler)

if not pkgutil.find_loader('setuptools'):
    from distutils.core import setup
    from distutils.extension import Extension
else:
    from setuptools import setup
    from setuptools.extension import Extension

# PEP 440 versioning of the RBD package on PyPI
# Bump this version, after every changeset

__version__ = '2.0.0'


def get_python_flags(libs):
    py_libs = sum(
        (libs.split()
示例#60
0
html_context = {
  "github_url":github_repo,
  "github_branch":git_branch_name
}

diag_fontpath = '/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf'
diag_html_image_format = 'SVG'
diag_latex_image_format = 'PNG'
diag_antialias = False

blockdiag_fontpath = nwdiag_fontpath = diag_fontpath
blockdiag_html_image_format = nwdiag_html_image_format = diag_html_image_format
blockdiag_latex_image_format = nwdiag_latex_image_format = diag_latex_image_format
blockdiag_antialias = nwdiag_antialias = diag_antialias

eggs_loader = pkgutil.find_loader('sphinxcontrib.spelling')
found = eggs_loader is not None

if found:
  extensions += ['sphinxcontrib.spelling']
  spelling_lang='en_US'
  spelling_word_list_filename='../wordlist'
  spelling_show_suggestions=True
  spelling_ignore_pypi_package_names=False
  spelling_ignore_wiki_words=True
  spelling_ignore_acronyms=True
  spelling_ignore_python_builtins=True
  spelling_ignore_importable_modules=True
  spelling_filters=[]

source_parsers = {