Ejemplo n.º 1
0
def set_default_mpi_parameters(parameters):
    # If mpi4py is used, make sure we can import it and set the rank/size for all cores in the parameters.mpi
    use_mpi4py = True
    if 'relaxations' in parameters:
        for module in parameters.relaxations:
            parameters.relaxations[module].setdefault('use_mpi4py', False)
            parameters.relaxations[module].setdefault('MPMD', 0)
            if parameters.relaxations[module].use_mpi4py:
                use_mpi4py = True
    if 'fitnesses' in parameters:
        for module in parameters.fitnesses:
            parameters.fitnesses[module].setdefault('use_mpi4py', False)
            parameters.fitnesses[module].setdefault('MPMD', 0)
            if parameters.fitnesses[module].use_mpi4py:
                use_mpi4py = True

    parameters.setdefault('mpi', {})
    if use_mpi4py:
        try:
            import mpi4py
        except ImportError:
            raise ImportError("mpi4py must be installed to use StructOpt.")
        mpiexec_path, _ = os.path.split(
            distutils.spawn.find_executable("mpiexec"))
        for executable, path in mpi4py.get_config().items():
            if executable not in [
                    'mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort'
            ]:
                continue
            if mpiexec_path not in path:
                raise ImportError(
                    "mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n"
                    .format(mpiexec_path=mpiexec_path,
                            mpi4py_config=mpi4py.get_config()))
        from mpi4py import MPI
        if 'Open MPI' not in MPI.get_vendor():
            raise ImportError(
                "mpi4py must have been installed against Open MPI in order for StructOpt to function correctly."
            )
        vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
        if vendor_number not in mpiexec_path:
            raise ImportError(
                "The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}"
                .format(MPI.get_vendor(), mpiexec_path))

        parameters.mpi.rank = MPI.COMM_WORLD.Get_rank()
        parameters.mpi.ncores = MPI.COMM_WORLD.Get_size()
    else:
        parameters.mpi.rank = 0
        parameters.mpi.ncores = 1

    return parameters
Ejemplo n.º 2
0
 def lock_error(self):
     if not self.has_lock():
         assert self.lock_error_file
         try:
             # make lock_err so process holding lock can check
             # another process had an error
             with open(self.lock_error_file, 'wb'):
                 pass
         except OSError:
             pass
     if mpi.get_mpi():
         import mpi4py
     else:
         mpi4py = None
     if mpi.is_main_process() and use_portalocker() is None:
         self.log.warning('install "portalocker" for better file lock control.')
     raise LoggedError(self.log,
                       "File %s is locked.\nYou may be running multiple jobs with "
                       "the same output when you intended to run with MPI. "
                       "Check that mpi4py is correctly installed and "
                       "configured (using the same mpi as mpirun/mpiexec); "
                       "e.g. try the test at\n"
                       "https://cobaya.readthedocs.io/en/latest/installation."
                       "html#mpi-parallelization-optional-but-encouraged\n"
                       + ("Your current mpi4py config is:"
                          "\n %s" % mpi4py.get_config()
                          if mpi4py is not None else
                          "mpi4py is NOT currently installed."), self.lock_file)
Ejemplo n.º 3
0
def bootstrap():
    # Set PETSC_DIR and PETSC_ARCH
    PETSC_DIR = os.path.abspath(os.getcwd())
    PETSC_ARCH = get_platform() + '-python'
    os.environ['PETSC_DIR'] = PETSC_DIR
    os.environ['PETSC_ARCH'] = PETSC_ARCH
    sys.path.insert(0, os.path.join(PETSC_DIR, 'config'))
    # Generate package __init__.py file
    from distutils.dir_util import mkpath
    pkgdir = os.path.join('config', 'pypi')
    if not os.path.exists(pkgdir): mkpath(pkgdir)
    pkgfile = os.path.join(pkgdir, '__init__.py')
    fh = open(pkgfile, 'wt')
    fh.write(init_py)
    fh.close()
    # Simple-minded lookup for MPI and mpi4py
    mpi4py = mpicc = None
    try:
        import mpi4py
        conf = mpi4py.get_config()
        mpicc = conf.get('mpicc')
    except ImportError:  # mpi4py is not installed
        mpi4py = None
        mpicc = os.environ.get('MPICC') or find_executable('mpicc')
    except AttributeError:  # mpi4py is too old
        pass
    if ('setuptools' in sys.modules):
        metadata['zip_safe'] = False
        if not mpi4py and mpicc:
            metadata['install_requires'] = ['mpi4py>=1.2.2']
Ejemplo n.º 4
0
def bootstrap():
    # Set PETSC_DIR and PETSC_ARCH
    PETSC_DIR  = os.path.abspath(os.getcwd())
    PETSC_ARCH = 'arch-python-' + get_platform()
    os.environ['PETSC_DIR']  = PETSC_DIR
    os.environ['PETSC_ARCH'] = PETSC_ARCH
    sys.path.insert(0, os.path.join(PETSC_DIR, 'config'))
    sys.path.insert(0, os.path.join(PETSC_DIR, 'lib','petsc','conf'))
    # Generate package __init__.py file
    from distutils.dir_util import mkpath
    pkgdir = os.path.join('config', 'pypi')
    if not os.path.exists(pkgdir): mkpath(pkgdir)
    pkgfile = os.path.join(pkgdir, '__init__.py')
    fh = open(pkgfile, 'w')
    fh.write(init_py)
    fh.close()
    # Configure options
    options = os.environ.get('PETSC_CONFIGURE_OPTIONS', '')
    CONFIGURE_OPTIONS.extend(split_quoted(options))
    if '--with-mpi=0' not in CONFIGURE_OPTIONS:
        # Simple-minded lookup for MPI and mpi4py
        mpi4py = mpicc = None
        try:
            import mpi4py
            conf = mpi4py.get_config()
            mpicc = conf.get('mpicc')
        except ImportError: # mpi4py is not installed
            mpi4py = None
            mpicc = (os.environ.get('MPICC') or
                     find_executable('mpicc'))
        except AttributeError: # mpi4py is too old
            pass
        if not mpi4py and mpicc:
            metadata['install_requires'] = ['mpi4py>=1.2.2']
Ejemplo n.º 5
0
def bootstrap():
    # Set PETSC_DIR and PETSC_ARCH
    PETSC_DIR  = os.path.abspath(os.getcwd())
    PETSC_ARCH = get_platform() + '-python'
    os.environ['PETSC_DIR']  = PETSC_DIR
    os.environ['PETSC_ARCH'] = PETSC_ARCH
    sys.path.insert(0, os.path.join(PETSC_DIR, 'config'))
    # Generate package __init__.py file
    from distutils.dir_util import mkpath
    pkgdir = os.path.join('config', 'pypi')
    if not os.path.exists(pkgdir): mkpath(pkgdir)
    pkgfile = os.path.join(pkgdir, '__init__.py')
    fh = open(pkgfile, 'wt')
    fh.write(init_py)
    fh.close()
    # Simple-minded lookup for MPI and mpi4py
    mpi4py = mpicc = None
    try:
        import mpi4py
        conf = mpi4py.get_config()
        mpicc = conf.get('mpicc')
    except ImportError: # mpi4py is not installed
        mpi4py = None
        mpicc = os.environ.get('MPICC') or find_executable('mpicc')
    except AttributeError: # mpi4py is too old
        pass
    if ('setuptools' in sys.modules):
        metadata['zip_safe'] = False
        if not mpi4py and mpicc:
            metadata['install_requires']= ['mpi4py>=1.2.2']
Ejemplo n.º 6
0
    def pytest_terminal_summary(self, terminalreporter, exitstatus, *args):
        """
        Hook for printing MPI info at the end of the run
        """
        # pylint: disable=unused-argument
        if self._is_testing_mpi:
            terminalreporter.section("MPI Information")
            try:
                from mpi4py import MPI, rc, get_config
            except ImportError:
                terminalreporter.write("Unable to import mpi4py")
            else:
                comm = MPI.COMM_WORLD
                terminalreporter.write("rank: {}\n".format(comm.rank))
                terminalreporter.write("size: {}\n".format(comm.size))

                terminalreporter.write("MPI version: {}\n".format(
                    '.'.join([str(v) for v in MPI.Get_version()])
                ))
                terminalreporter.write("MPI library version: {}\n".format(
                    MPI.Get_library_version()
                ))

                vendor, vendor_version = MPI.get_vendor()
                terminalreporter.write("MPI vendor: {} {}\n".format(
                    vendor, '.'.join([str(v) for v in vendor_version])
                ))

                terminalreporter.write("mpi4py rc: \n")
                for name, value in vars(rc).items():
                    terminalreporter.write(" {}: {}\n".format(name, value))

                terminalreporter.write("mpi4py config:\n")
                for name, value in get_config().items():
                    terminalreporter.write(" {}: {}\n".format(name, value))
Ejemplo n.º 7
0
def mpicc_show():
    """Use ``mpicc --show`` to retrieve the mpicc arguments.

    Works with both openmpi and mpich.
    Returns a dictionary that can be passed to Extension().
    """
    import mpi4py
    import subprocess
    mpicc = mpi4py.get_config()['mpicc']
    mpicc_show = subprocess.check_output([mpicc, '-show']).decode().strip()
    # Strip command line from first part, which is the name of the compiler
    mpicc_show = re.sub('\S+\s', '', mpicc_show, count=1)

    def my_filter(regex, iterable, group=0):
        matching = []
        non_matching = []
        for item in iterable:
            m = re.search(regex, item)
            if m is not None:
                matching.append(m.group(group))
            else:
                non_matching.append(item)
        return matching, non_matching

    cflags = split_quoted(mpicc_show)
    incdirs, cflags = my_filter('^-I(.*)', cflags, 1)
    libdirs, cflags = my_filter('^-L(.*)', cflags, 1)
    ldflags, cflags = my_filter('^-W?l.*', cflags)
    ldflags += cflags
    incdirs.append(mpi4py.get_include())

    return {'include_dirs': incdirs,
            'library_dirs': libdirs,
            'extra_compile_args': cflags,
            'extra_link_args': ldflags}
Ejemplo n.º 8
0
def config(dry_run=False):
    log.info('PETSc: configure')
    if dry_run: return
    options = [
        'PETSC_ARCH='+os.environ['PETSC_ARCH'],
        '--with-debugging=0',
        '--with-shared',
        '--with-cmake=0', # not needed
        ]
    # MPI
    try:
        import mpi4py
        conf = mpi4py.get_config()
        mpicc = conf.get('mpicc')
    except (ImportError, AttributeError):
        mpicc = os.environ.get('MPICC') or find_executable('mpicc')
    if mpicc:
        options.append('--with-cc='+mpicc)
    else:
        options.append('--with-mpi=0')
    options.append('--with-cxx=0') # XXX mpicxx?
    options.append('--with-fc=0')  # XXX mpif90?
    # Run PETSc configure
    status = os.system('%s %s %s' % (
            find_executable('python'),
            os.path.join('config', 'configure.py'),
            ' '.join(options),
            ))
    if status != 0: raise RuntimeError(status)
Ejemplo n.º 9
0
def bootstrap():
    # Set PETSC_DIR and PETSC_ARCH
    PETSC_DIR  = os.path.abspath(os.getcwd())
    PETSC_ARCH = 'arch-python-' + get_platform()
    os.environ['PETSC_DIR']  = PETSC_DIR
    os.environ['PETSC_ARCH'] = PETSC_ARCH
    sys.path.insert(0, os.path.join(PETSC_DIR, 'config'))
    sys.path.insert(0, os.path.join(PETSC_DIR, 'lib','petsc','conf'))
    # Generate package __init__.py file
    from distutils.dir_util import mkpath
    pkgdir = os.path.join('config', 'pypi')
    if not os.path.exists(pkgdir): mkpath(pkgdir)
    pkgfile = os.path.join(pkgdir, '__init__.py')
    fh = open(pkgfile, 'w')
    fh.write(init_py)
    fh.close()
    # Configure options
    options = os.environ.get('PETSC_CONFIGURE_OPTIONS', '')
    CONFIGURE_OPTIONS.extend(split_quoted(options))
    if '--with-mpi=0' not in CONFIGURE_OPTIONS:
        # Simple-minded lookup for MPI and mpi4py
        mpi4py = mpicc = None
        try:
            import mpi4py
            conf = mpi4py.get_config()
            mpicc = conf.get('mpicc')
        except ImportError: # mpi4py is not installed
            mpi4py = None
            mpicc = (os.environ.get('MPICC') or
                     find_executable('mpicc'))
        except AttributeError: # mpi4py is too old
            pass
        if not mpi4py and mpicc:
            metadata['install_requires'] = ['mpi4py>=1.2.2']
Ejemplo n.º 10
0
def config(dry_run=False):
    log.info('PETSc: configure')
    if dry_run: return
    options = [
        'PETSC_ARCH=' + os.environ['PETSC_ARCH'],
        '--with-debugging=0',
        '--with-shared',
        '--with-cmake=0',  # not needed
    ]
    # MPI
    try:
        import mpi4py
        conf = mpi4py.get_config()
        mpicc = conf.get('mpicc')
    except (ImportError, AttributeError):
        mpicc = os.environ.get('MPICC') or find_executable('mpicc')
    if mpicc:
        options.append('--with-cc=' + mpicc)
    else:
        options.append('--with-mpi=0')
    options.append('--with-cxx=0')  # XXX mpicxx?
    options.append('--with-fc=0')  # XXX mpif90?
    # Run PETSc configure
    status = os.system('%s %s %s' % (
        find_executable('python'),
        os.path.join('config', 'configure.py'),
        ' '.join(options),
    ))
    if status != 0: raise RuntimeError(status)
Ejemplo n.º 11
0
    def initialize_options(self):
        try:
            compiler = str(mpi4py.get_config()['mpicc'])
        except:
            compiler = "mpicc"

        self.mpicc = os.environ.get('MPICC', compiler)

        build_ext.initialize_options(self)
Ejemplo n.º 12
0
    def initialize_options(self):
        try:
            compiler = str(mpi4py.get_config()['mpicc'])
        except:
            compiler = "mpicc"

        self.mpicc = os.environ.get('MPICC', compiler)

        build_ext.initialize_options(self)
Ejemplo n.º 13
0
def config(prefix, dry_run=False):
    log.info('PETSc: configure')
    options = [
        '--prefix=' + prefix,
        'PETSC_ARCH='+os.environ['PETSC_ARCH'],
        '--with-shared-libraries=1',
        '--with-debugging=0',
        '--with-c2html=0', # not needed
        ]
    if '--with-fc=0' in CONFIGURE_OPTIONS:
        options.append('--with-sowing=0')
    if '--with-mpi=0' not in CONFIGURE_OPTIONS:
        try:
            import mpi4py
            conf = mpi4py.get_config()
            mpicc  = conf.get('mpicc')
            mpicxx = conf.get('mpicxx')
            mpif90 = conf.get('mpif90')
        except (ImportError, AttributeError):
            mpicc  = os.environ.get('MPICC')  or find_executable('mpicc')
            mpicxx = os.environ.get('MPICXX') or find_executable('mpicxx')
            mpif90 = os.environ.get('MPIF90') or find_executable('mpif90')
        if mpicc:
            options.append('--with-cc='+mpicc)
            if '--with-cxx=0' not in CONFIGURE_OPTIONS:
                if mpicxx:
                    options.append('--with-cxx='+mpicxx)
                else:
                    options.append('--with-cxx=0')
            if '--with-fc=0' not in CONFIGURE_OPTIONS:
                if mpif90:
                    options.append('--with-fc='+mpif90)
                else:
                    options.append('--with-fc=0')
                    options.append('--with-sowing=0')
        else:
            options.append('--with-mpi=0')
    options.extend(CONFIGURE_OPTIONS)
    #
    log.info('configure options:')
    for opt in options:
        log.info(' '*4 + opt)
    # Run PETSc configure
    if dry_run: return
    use_config_py = False
    if use_config_py:
        import configure
        configure.petsc_configure(options)
        import logger
        logger.Logger.defaultLog = None
    else:
        python = find_executable('python2') or find_executable('python')
        command = [python, './configure'] + options
        status = os.system(" ".join(command))
        if status != 0: raise RuntimeError(status)
Ejemplo n.º 14
0
def check_mpi():
    mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
    for executable, path in mpi4py.get_config().items():
        if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']:
            continue
        if mpiexec_path not in path:
            raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config()))
    if 'Open MPI' not in MPI.get_vendor():
        raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.")
    vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
    if vendor_number not in mpiexec_path:
        raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path))
Ejemplo n.º 15
0
def check_mpi():
    mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
    for executable, path in mpi4py.get_config().items():
        if executable not in [
                'mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort'
        ]:
            continue
        if mpiexec_path not in path:
            raise ImportError(
                "mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n"
                .format(mpiexec_path=mpiexec_path,
                        mpi4py_config=mpi4py.get_config()))
    if 'Open MPI' not in MPI.get_vendor():
        raise ImportError(
            "mpi4py must have been installed against Open MPI in order for StructOpt to function correctly."
        )
    vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
    if vendor_number not in mpiexec_path:
        raise ImportError(
            "The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}"
            .format(MPI.get_vendor(), mpiexec_path))
Ejemplo n.º 16
0
def mpi_info(cmd):
    import mpi4py

    config = mpi4py.get_config()
    cmd_compile = " ".join([config["mpicc"], "--showme:%s" % cmd])
    out_stream = os.popen(cmd_compile)
    flags = out_stream.read().strip()

    out = [
        p[2:] if p.startswith(('-I', '-L', '-l')) else p
        for p in flags.split()
    ]
    return out
Ejemplo n.º 17
0
def main():
    enableGPU = (len(sys.argv) >= 2 and "--GPU" in sys.argv)
    if enableGPU: sys.argv.pop(sys.argv.index("--GPU"))

    extensions = []
    if enableGPU:
        writeTargetToSourceCode(target='GPU')
        extensions.append(
            make_extension(
                "JDFTxCalcGPU",
                ["jdftx_gpu"],
                enableGPU=True,
            ), )

    writeTargetToSourceCode(target='CPU')
    extensions.append(make_extension("JDFTxCalcCPU", ["jdftx"]), )

    for e in extensions:
        e.cython_directives = {
            "boundscheck": False,
            "wraparound": False,
            "infer_types": True
        }

    mpiCompilers = mpi4py.get_config()
    os.environ['CC'] = mpiCompilers['mpicc']
    os.environ['CXX'] = mpiCompilers['mpicxx']

    pyVersion = sys.version_info[0]

    extensions = cythonize(extensions,
                           nthreads=nthreads,
                           compiler_directives={'language_level': pyVersion})

    setup(
        **{
            "name": "pythonJDFTx",
            # "packages": [
            #     "core",
            #     "electronic",
            #     "includes",
            #     "fluid",
            # ],
            "py_modules": ["ElectronicMinimize"],
            "ext_modules": extensions,
            "cmdclass": {
                'build_ext': build_ext
            },
        })
Ejemplo n.º 18
0
def find_mpi4py_mpif90_compiler():
    # try to get the mpif90 compiler from mpi4py, it isn't always there..
    # if a user installs like `env MPICC=/path/to/mpicc pip install mpi4py ...`
    # it doesn't seem to have anything other than mpicc
    mpi4py_compilers = mpi4py.get_config()
    if 'mpif90' in mpi4py_compilers:
        return mpi4py_compilers['mpif90']
    elif 'mpifort' in mpi4py_compilers:
        return mpi4py_compilers['mpifort']
    # last effort, try to build the possible location
    elif 'mpicc' in mpi4py_compilers and os.path.exists(
            mpi4py_compilers['mpicc'][:-2] + 'f90'):
        return mpi4py_compilers['mpicc'][:-2] + 'f90'
    else:
        return None
Ejemplo n.º 19
0
def config(dry_run=False):
    log.info('PETSc: configure')
    options = [
        'PETSC_ARCH='+os.environ['PETSC_ARCH'],
        '--with-shared-libraries=1',
        '--with-debugging=0',
        '--with-c2html=0', # not needed
        ]
    # MPI
    try:
        import mpi4py
        conf = mpi4py.get_config()
        mpicc  = conf.get('mpicc')
        mpicxx = conf.get('mpicxx')
        mpif90 = conf.get('mpif90')
    except (ImportError, AttributeError):
        mpicc  = os.environ.get('MPICC')  or find_executable('mpicc')
        mpicxx = os.environ.get('MPICXX') or find_executable('mpicxx')
        mpif90 = os.environ.get('MPIF90') or find_executable('mpif90')
    if mpicc:
        options.append('--with-cc='+mpicc)
        if mpicxx:
            options.append('--with-cxx='+mpicxx)
        else:
            options.append('--with-cxx=0')
        if mpif90:
            options.append('--with-fc='+mpif90)
        else:
            options.append('--with-fc=0')
            options.append('--with-sowing=0')
    else:
        options.append('--with-mpi=0')
    # Extra configure options
    config_opts = os.environ.get('PETSC_CONFIGURE_OPTIONS', '')
    config_opts = split_quoted(config_opts)
    options.extend(config_opts)
    log.info('configure options:')
    for opt in options:
        log.info(' '*4 + opt)
    # Run PETSc configure
    if dry_run: return
    import configure
    configure.petsc_configure(options)
    import logger
    logger.Logger.defaultLog = None
Ejemplo n.º 20
0
def main():
    enableGPU = (len(sys.argv) >= 2 and "--GPU" in sys.argv)
    if enableGPU: sys.argv.pop(sys.argv.index("--GPU"))

    extensions = []
    if enableGPU:
        writeTargetToSourceCode(target = 'GPU')
        extensions.append(
            make_extension("JDFTxCalcGPU", ["jdftx_gpu"], enableGPU=True,
                            ),
    	)

    writeTargetToSourceCode(target = 'CPU')
    extensions.append(
        make_extension("JDFTxCalcCPU", ["jdftx"]),
    )

    for e in extensions:
        e.cython_directives = {"boundscheck": False,
                               "wraparound": False,
                               "infer_types": True}

    mpiCompilers = mpi4py.get_config()
    os.environ['CC'] = mpiCompilers['mpicc']
    os.environ['CXX'] = mpiCompilers['mpicxx']

    pyVersion = sys.version_info[0]

    extensions = cythonize(extensions, nthreads=nthreads,
                            compiler_directives = {'language_level': pyVersion})

    setup(**{
        "name": "pythonJDFTx",
        # "packages": [
        #     "core",
        #     "electronic",
        #     "includes",
        #     "fluid",
        # ],
        "py_modules":["ElectronicMinimize"],
        "ext_modules": extensions,
        "cmdclass": {'build_ext': build_ext},
    })
Ejemplo n.º 21
0
def set_default_mpi_parameters(parameters):
    # If mpi4py is used, make sure we can import it and set the rank/size for all cores in the parameters.mpi
    use_mpi4py = True
    if 'relaxations' in parameters:
        for module in parameters.relaxations:
            parameters.relaxations[module].setdefault('use_mpi4py', False)
            parameters.relaxations[module].setdefault('MPMD', 0)
            if parameters.relaxations[module].use_mpi4py:
                use_mpi4py = True
    if 'fitnesses' in parameters:
        for module in parameters.fitnesses:
            parameters.fitnesses[module].setdefault('use_mpi4py', False)
            parameters.fitnesses[module].setdefault('MPMD', 0)
            if parameters.fitnesses[module].use_mpi4py:
                use_mpi4py = True

    parameters.setdefault('mpi', {})
    if use_mpi4py:
        try:
            import mpi4py
        except ImportError:
            raise ImportError("mpi4py must be installed to use StructOpt.")
        mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
        for executable, path in mpi4py.get_config().items():
            if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']:
                continue
            if mpiexec_path not in path:
                raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config()))
        from mpi4py import MPI
        if 'Open MPI' not in MPI.get_vendor():
            raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.")
        vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
        if vendor_number not in mpiexec_path:
            raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path))

        parameters.mpi.rank = MPI.COMM_WORLD.Get_rank()
        parameters.mpi.ncores = MPI.COMM_WORLD.Get_size()
    else:
        parameters.mpi.rank = 0
        parameters.mpi.ncores = 1

    return parameters
Ejemplo n.º 22
0
def config(dry_run=False):
    log.info('PETSc: configure')
    options = [
        'PETSC_ARCH=' + os.environ['PETSC_ARCH'],
        '--with-shared-libraries=1',
        '--with-debugging=0',
        '--with-c2html=0',  # not needed
        #'--with-sowing=0',
        #'--with-cmake=0',
    ]
    # MPI
    try:
        import mpi4py
        conf = mpi4py.get_config()
        mpicc = conf.get('mpicc')
        mpicxx = conf.get('mpicxx')
        mpif90 = conf.get('mpif90')
    except (ImportError, AttributeError):
        mpicc = os.environ.get('MPICC') or find_executable('mpicc')
        mpicxx = os.environ.get('MPICXX') or find_executable('mpicxx')
        mpif90 = os.environ.get('MPIF90') or find_executable('mpif90')
    if mpicc:
        options.append('--with-cc=' + mpicc)
        if mpicxx:
            options.append('--with-cxx=' + mpicxx)
        if mpif90:
            options.append('--with-fc=' + mpif90)
    else:
        options.append('--with-mpi=0')
    # Extra configure options
    config_opts = os.environ.get('PETSC_CONFIGURE_OPTIONS', '')
    config_opts = split_quoted(config_opts)
    options.extend(config_opts)
    log.info('configure options:')
    for opt in options:
        log.info(' ' * 4 + opt)
    # Run PETSc configure
    if dry_run: return
    import configure
    configure.petsc_configure(options)
    import logger
    logger.Logger.defaultLog = None
Ejemplo n.º 23
0
def mpi_info(cmd):
    config = mpi4py.get_config()
    cmd_compile = " ".join([config["mpicc"], "-show"])
    out_stream = os.popen(cmd_compile)
    flags = out_stream.read().strip()
    flags = flags.replace(",", " ").split()

    if cmd == "compile":
        startwith = "-I"
    elif cmd == "libdirs":
        startwith = "-L"
    elif cmd == "libs":
        startwith = "-l"

    out = []
    for flag in flags:
        if flag.startswith(startwith):
            out.append(flag[2:])

    return out
Ejemplo n.º 24
0
def mpicc_showme():
    """Use ``mpicc --showme`` to retrieve the mpicc arguments.

    Works with openmpi, not mpich.
    Returns a dictionary that can be passed to Extension().
    """

    import mpi4py
    from subprocess import check_output
    mpicc = mpi4py.get_config()['mpicc']

    def call_mpicc_showme(arg):
        out = check_output([mpicc, '--showme:'+arg])
        return out.decode('ascii').split()

    incdirs = call_mpicc_showme('incdirs')
    incdirs.append(mpi4py.get_include())

    return {'include_dirs': incdirs,
            'library_dirs': call_mpicc_showme('libdirs'),
            'extra_compile_args': call_mpicc_showme('compile'),
            'extra_link_args': call_mpicc_showme('link')}
Ejemplo n.º 25
0
def config(prefix, dry_run=False):
    log.info('UW: configure')
    options = [
        '--prefix=' + prefix,
        '--with-debugging=0',
    ]
    try:
        import mpi4py
        conf = mpi4py.get_config()
        mpicc = conf.get('mpicc')
        mpicxx = conf.get('mpicxx')
    except AttributeError:
        mpicc = os.environ.get('MPICC') or find_executable('mpicc')
        mpicxx = os.environ.get('MPICXX') or find_executable('mpicxx')
    if mpicc:
        options.append('--cc=' + mpicc)
    if mpicxx:
        options.append('--cxx=' + mpicxx)
    options.extend(split_quoted(os.environ.get('UW_CONFIGURE_OPTIONS', '')))

    if 'PETSC_DIR' in os.environ:
        options.append('--petsc-dir=' + os.environ['PETSC_DIR'])
    else:
        try:
            import petsc
            options.append('--petsc-dir=' + petsc.get_config()['PETSC_DIR'])
        except:
            pass

    log.info('configure options:')
    for opt in options:
        log.info(' ' * 4 + opt)
    # Run UW configure
    if dry_run: return
    python = find_executable('python3')
    command = [python, './configure.py'] + options
    status = os.system(" ".join(command))
    if status != 0: raise RuntimeError(status)
Ejemplo n.º 26
0
def find_compilers():
    cc = None
    cxx = None
    # If we have mpi4py, then get the MPI compilers that were used to build that.
    # Then get the serial compilers used by the MPI wrappers.  Otherwise, just the
    # normal distutils compilers.
    try:
        from mpi4py import MPI
        import mpi4py

        mpiconf = mpi4py.get_config()
        mpicc = mpiconf["mpicc"]
        mpicxx = mpiconf["mpicxx"]
        mpicc_com = None
        mpicxx_com = None
        try:
            mpicc_com = subprocess.check_output(
                "{} -show".format(mpicc), shell=True, universal_newlines=True
            )
        except CalledProcessError:
            # Cannot run the MPI C compiler, give up
            raise ImportError
        try:
            mpicxx_com = subprocess.check_output(
                "{} -show".format(mpicxx), shell=True, universal_newlines=True
            )
        except CalledProcessError:
            # Cannot run the MPI C++ compiler, give up
            raise ImportError
        # Extract the serial compilers
        cc = mpicc_com.split()[0]
        cxx = mpicxx_com.split()[0]

    except ImportError:
        pass

    return (cc, cxx)
Ejemplo n.º 27
0
#!/usr/bin/env python

import os
from distutils.core import setup, Extension

import mpi4py

mpi4py_inc = mpi4py.get_include()

mpi_bin_dir = os.path.dirname( mpi4py.get_config()['mpicc'] )
mpi_dir = os.path.realpath( os.path.join(mpi_bin_dir,'..') )
mpi_inc_dir = os.path.join(mpi_dir, 'include')
mpi_lib_dir = os.path.join(mpi_dir, 'lib')

compute_pi = Extension('_compute_pi',
   sources = ['compute_pi.i', 'compute_pi.c'],
   #libraries = ['mpich','opa','mpl','rt','pthread'],
   libraries = ['mpich',],
   include_dirs = [mpi_inc_dir, mpi4py_inc],
   library_dirs = [mpi_lib_dir],
   runtime_library_dirs = [mpi_lib_dir],
   swig_opts=['-I' + mpi4py_inc],
   )

setup (name = 'compute_pi',
       version = '0.1',
       ext_modules = [compute_pi],
       py_modules = ["compute_pi"],
       )
Ejemplo n.º 28
0
#!/usr/bin/env python

import os
from distutils.core import setup, Extension

import mpi4py


def include_flags(dirs):
    return ['-I' + d for d in dirs]


mpi4py_inc = mpi4py.get_include()

mpi_bin_dir = os.path.dirname(mpi4py.get_config()['mpicc'])
mpi_dir = os.path.realpath(os.path.join(mpi_bin_dir, '..'))
mpi_dir = '/home/wechsung/bin/openmpi-2.1.1/build/'
mpi_inc_dir = os.path.join(mpi_dir, 'include')
mpi_lib_dir = os.path.join(mpi_dir, 'lib')

#TODO
parmetis_dir = '/home/wechsung/bin/firedrake-dev-20181123-mkl/src/petsc/linux-gnu-c-opt/externalpackages/git.parmetis/'
parmetis_inc_dir = os.path.join(parmetis_dir, 'include')
parmetis_lib_dir = os.path.join(parmetis_dir, 'petsc-build/libparmetis')

metis_dir = '/home/wechsung/bin/firedrake-dev-20181123-mkl/src/petsc/linux-gnu-c-opt/externalpackages/git.metis/petsc-build/'
metis_inc_dir = os.path.join(metis_dir, 'include')

pyparmetis = Extension(
    '_pyparmetis',
    sources=[
Ejemplo n.º 29
0
#: - If the environment variable :data:`.LITEBIRD_SIM_MPI` is set to
#:   `1`, use MPI and fail if `mpi4py` cannot be imported;
#:
#: - If the environment variable :data:`.LITEBIRD_SIM_MPI` is set to
#:   `0`, avoid using MPI even if `mpi4py` is present;
#:
#: - If the environment variable :data:`.LITEBIRD_SIM_MPI` is *not* set,
#:   try to use MPI and gracefully revert to a serial mode of execution
#:   if `mpi4py` cannot be imported.
MPI_ENABLED = False

#: If :data:`.MPI_ENABLED` is `True`, this is a dictionary containing
#: information about the MPI configuration. Otherwise, it is an empty
#: dictionary
MPI_CONFIGURATION = {}

_enable_mpi = _check_if_enable_mpi()
if _enable_mpi in [True, None]:
    try:
        import mpi4py
        from mpi4py import MPI

        MPI_COMM_WORLD = MPI.COMM_WORLD
        MPI_ENABLED = True
        MPI_CONFIGURATION = mpi4py.get_config()
    except ImportError:
        if _enable_mpi:
            raise  # If MPI was explicitly requested, re-raise the exception
        else:
            pass  # Ignore the error
Ejemplo n.º 30
0
# ---

import mpi4py
try: mpi4py.get_include()
except: pass
try: mpi4py.get_config()
except: pass

# ---

def test_mpi4py_rc():
    import mpi4py.rc
    mpi4py.rc(
    initialize = True,
    threads = True,
    thread_level = 'multiple',
    finalize = None,
    fast_reduce = True,
    recv_mprobe = True,
    errors = 'exception',
    )
    try: mpi4py.rc(qwerty=False)
    except TypeError: pass
    else: raise RuntimeError

test_mpi4py_rc()

# ---

def test_mpi4py_profile():
    import mpi4py
Ejemplo n.º 31
0
import mpi4py
from mpi4py import MPI
from neuron import h

print(mpi4py.get_config())
print(mpi4py.get_include())

h.load_file("stdlib.hoc")
h.load_file("stdrun.hoc")
root = 0
pc = h.ParallelContext()
id = int(pc.id())
nhost = int(pc.nhost())
print("I am %i of %i" % (id, nhost))
v = h.Vector(1)
if id == root:
    v.x[0] = 17
pc.broadcast(v, root)
print(v.x[0])
Ejemplo n.º 32
0
def build_overlap_matrix_parallel(rows, filename='./overlap_matrix_parallel_block.csv'):
    start_time = timeit.default_timer()
    comm = MPI.COMM_WORLD
    size = comm.Get_size()
    rank = comm.Get_rank()
    status = MPI.Status()
    data = []
    n_indices = 0
    # print('Total CPUs: {:4d}, Rank {:4d}, rows: {:5d}, rows_per_cpu: {:5d}, row_start: {:5d}, cols_per_cpu: {:5d}, col_start: {:5d}'.format(size, rank, rows, rows_local, row_start, cols_local, col_start))

    if rank == 0:
        file_open = fu.FileOpen("data", "tweet-data.csv")
        tweet_spatial_analysis_config = cu.TweetSpatialAnalysisConfig("conf/tweet_spatial_analysis.ini")
        tdp = TweetDataPreProcessing(file_open, tweet_spatial_analysis_config)
        tdp.read_from_json("data/tweet_mean_all.json",
                           "data/tweets_median_working.json",
                           "data/tweets_median_non_working.json")
        df = tdp.tweet_data_working.df
        if rows == 0:
            rows, _ = df.shape
        mpi4py.get_config()
        np.__config__.show()
        tweet_data_working_overlap = np.zeros((rows, rows))
        indices = [(i, j) for i in range(rows) for j in range(rows) if j < i]
        n_indices = len(indices)
        n_indices = comm.bcast(n_indices, root=0)
        n_sent = 0
        for k in range(min(size - 1, n_indices)):
            i, j = indices[n_sent]
            data =  i, j, \
                    df['x'][i], df['y'][i], df['a'][i], df['b'][i], df['angle'][i], \
                    df['x'][j], df['y'][j], df['a'][j], df['b'][j], df['angle'][j]
            comm.send(data, dest=n_sent + 1, tag=n_sent)
            n_sent += 1

        for k in range(n_indices):
            overlap = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
            src = status.Get_source()
            tag = status.Get_tag()

            i, j = indices[tag]
            tweet_data_working_overlap[i, j] = overlap
            if n_sent < n_indices:

                i, j = indices[n_sent]
                data =  i, j, \
                        df['x'][i], df['y'][i], df['a'][i], df['b'][i], df['angle'][i], \
                        df['x'][j], df['y'][j], df['a'][j], df['b'][j], df['angle'][j]
                comm.send(data, dest=src, tag=n_sent)
                n_sent += 1
            else:
                comm.send(indices[0], dest=src, tag=60000)
            # print('CPU {:04d}: Received data from CPU {:04d} (Time: {:.2f} seconds)'.format(rank,  src, timeit.default_timer() - start_time))

            # if ((k + 1) / n_indices * 100) % 10 < 0.01:
            if (k + 1) % 1000000 == 0:
                print('CPU {:04d}: {:6.2f}% accomplished (Time: {:.2f} seconds)'.format(rank,  (k + 1) / n_indices * 100, timeit.default_timer() - start_time))

        tweet_data_working_overlap = tweet_data_working_overlap + tweet_data_working_overlap.T
        print('CPU {:04d}: Merged received data to global numpy array (Time: {:.2f} seconds)'.format(rank, timeit.default_timer() - start_time))
        tweet_data_working_overlap = pd.DataFrame(data=tweet_data_working_overlap, columns=df['id'][0:rows], index=df['id'][0:rows])
        print('CPU {:04d}: Converted numpy array to pandas DataFrame (Time: {:.2f} seconds)'.format(rank, timeit.default_timer() - start_time))

        data = find_components(tweet_data_working_overlap, filename, start_time)
        df, components, idx, col, idx_list, col_list = data
        number_of_components = len(components)
        number_of_components = comm.bcast(number_of_components, root=0)
        print('CPU {:04d}: Found {:d} components (Time: {:.2f} seconds)'.format(rank, number_of_components, timeit.default_timer() - start_time))

        nsent = 0
        for i in range(min(size - 1, number_of_components)):
            comm.send(df.loc[idx_list[i],col_list[i]], dest=i + 1, tag=i)
            nsent += 1

        df_dist_list = []
        for i in range(number_of_components):
            D = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
            src = status.Get_source()
            tag = status.Get_tag()

            # print('CPU {:04d}: Received data from CPU {:04d} (Time: {:.2f} seconds)'.format(rank, src, timeit.default_timer() - start_time))
            df_dist_list.append(pd.DataFrame(data=D, columns=col_list[tag], index=idx_list[tag]))

            if nsent < number_of_components:
                comm.send(df.loc[idx_list[nsent],col_list[nsent]], dest=src, tag=nsent)
            else:
                comm.send([], dest=src, tag=number_of_components + 10)

            nsent += 1

        print('CPU {:04d}: Constructing distance pandas DataFrame (Time: {:.2f} seconds)'.format(0, timeit.default_timer() - start_time))
        df_dist = np.ones(df.shape) * -1
        df_dist = pd.DataFrame(data=df_dist, columns=col, index=idx)
        # print(len(idx_list), len(df_dist_list))

        for i in range(len(idx_list)):
            idx_local = df_dist_list[i].index
            col_local = df_dist_list[i].columns
            df_dist.loc[idx_local, col_local] = df_dist_list[i]

        filename_dis = filename.replace('.csv', '_distance.csv')

        print('CPU {:04d}: Now saving pandas DataFrame to {:s} (Time: {:.2f} seconds)'.format(0, filename_dis, timeit.default_timer() - start_time))
        df_dist = df_dist.astype(np.int8)
        df_dist.to_csv(filename_dis, sep=',', header=True, index=True)

        elapsed_time = timeit.default_timer() - start_time
        hour = math.floor(elapsed_time / 3600)
        minute = math.floor((elapsed_time - hour * 3600) / 60)
        second = elapsed_time - 3600 * hour - 60 * minute
        print('Time elapsed: {:d} hours {:d} minutes {:.2f} seconds'.format(hour, minute, second))

    if rank > 0:
        n_indices = comm.bcast(n_indices, root=0)
        if rank < n_indices:
            while True:
                data = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
                tag = status.Get_tag()
                if tag == 60000:
                    break
                i, j, x1, y1, a1, b1, t1, x2, y2, a2, b2, t2 = data
                if i == j:
                    overlap = 0
                elif j < i:
                    if a1 == 0 or b1 == 0 or a2 == 0 or b2 == 0:
                        overlap = -1
                    else:
                        overlap = au.are_two_ellipses_overlapping(x1, y1, a1, b1, t1, x2, y2, a2, b2, t2)
                comm.send(overlap, dest=0, tag=tag)
                # print('CPU {:04d}: Sent data to CPU {:04d}'.format(rank,  0))

        number_of_components = 0
        number_of_components = comm.bcast(number_of_components, root=0)
        if rank < number_of_components:
            while True:
                A = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
                tag = status.Get_tag()
                if tag == number_of_components + 10:
                    break
                m, n = A.shape
                if m > 1:
                    print('CPU {:04d}: Starting APD for {:d}-by-{:d} matrix (Time: {:.2f} seconds)'.format(rank, m, n, timeit.default_timer() - start_time))
                D = APD_recursive(np.array(A))
                comm.send(D, dest=0, tag=tag)
Ejemplo n.º 33
0
import mpi4py
mpi4py.get_include()
mpi4py.get_config()

import mpi4py.rc
mpi4py.rc(
initialize = True,
threads = True,
thread_level = 'multiple',
finalize = None,
fast_reduce = True,
recv_mprobe = True,
errors = 'exception',
)
try: mpi4py.rc(querty=False)
except TypeError: pass

import mpi4py
mpi4py.profile()
mpi4py.profile('mpe')
mpi4py.profile('mpe', path="/usr/lib")
mpi4py.profile('mpe', path=["/usr/lib"])
mpi4py.profile('mpe', logfile="mpi4py")
mpi4py.profile('mpe', logfile="mpi4py")
mpi4py.profile('vt')
mpi4py.profile('vt', path="/usr/lib")
mpi4py.profile('vt', path=["/usr/lib"])
mpi4py.profile('vt', logfile="mpi4py")
mpi4py.profile('vt', logfile="mpi4py")
try: mpi4py.profile('@querty')
except ValueError: pass
Ejemplo n.º 34
0
        join("mgmetis", "src", "include"),
        join("mgmetis", "src", "libparmetis"),
        mpi4py.get_include(),
    ]
    _parmetis_src = glob.glob(join("mgmetis", "src", "libparmetis", "*.c"))
    # NOTE: library linking is resolved in build_ext
    exts += [
        Extension(
            "mgmetis._cython.parmetis",
            [join("mgmetis", "_cython", "parmetis.pyx")] + _parmetis_src,
            include_dirs=_parmetis_incs,
        ),
        Extension(
            "mgmetis._cython.parmetis64",
            [join("mgmetis", "_cython", "parmetis64.pyx")] + _parmetis_src,
            include_dirs=_parmetis_incs,
            define_macros=[("IDXTYPEWIDTH", "64")],
        ),
    ]
    # set compiler
    os.environ["CC"] = mpi4py.get_config()["mpicc"]
    # set linker to mpi
    os.environ["LDSHARED"] = " ".join(
        [mpi4py.get_config()["mpicc"]] +
        sysconfig.get_config_var("LDSHARED").split()[1:])

_opts = {"language_level": 3}
if not debug:
    _opts.update({"wraparound": False, "boundscheck": False})
exts = cythonize(exts, compiler_directives=_opts)
Ejemplo n.º 35
0
# ---

import mpi4py
try:
    mpi4py.get_include()
except:
    pass
try:
    mpi4py.get_config()
except:
    pass

# ---


def test_mp4py_rc():
    import mpi4py.rc
    mpi4py.rc(
        initialize=True,
        threads=True,
        thread_level='multiple',
        finalize=None,
        fast_reduce=True,
        recv_mprobe=True,
        errors='exception',
    )
    try:
        mpi4py.rc(qwerty=False)
    except TypeError:
        pass
    else:
Ejemplo n.º 36
0
 def testGetConfig(self):
     conf = mpi4py.get_config()
     self.assertTrue(isinstance(conf, dict))
     mpicc = conf.get('mpicc')
     if mpicc is not None:
         self.assertTrue(os.path.exists(mpicc))
Ejemplo n.º 37
0
 def find_executables(self):
     self.executables['compiler_f90'][0] = mpi4py.get_config()['mpif90']
     return _find_executables(self)
Ejemplo n.º 38
0
class CPL:
    # Shared attribute containing the library
    CFD_REALM = 1
    MD_REALM = 2
    GATHER_SCATTER = 1
    SEND_RECEIVE = 2
    NULL_REALM = 0
    _libname = "libcpl"
    try:
        _lib_path = os.environ["CPL_LIBRARY_PATH"]
        if os.path.exists(_lib_path + "/"+ _libname + ".so"):
            _cpl_lib = load_library(_libname, _lib_path)
        else:
            raise CPLLibraryNotFound("Compiled CPL library libcpl.so not found at " + _lib_path + "/"+ _libname + ".so")
    except KeyError as e:
        print("CPL info: ", "CPL_LIBRARY_PATH not defined. Looking in system directories...")
        try:
            _cpl_lib = cdll.LoadLibrary(_libname + ".so")
            print("CPL info: ", "Success!")
        except OSError as e:
            raise CPLLibraryNotFound("Library libcpl.so not found!")
            #TODO: Check this
            #time.sleep(2)
            #MPI.COMM_WORLD.Abort(errorcode=1)

    # Check for JSON support by cheking if load_param_file symbol exists
    JSON_SUPPORT = True
    try:
        _cpl_lib.CPLC_load_param_file
    except:
        JSON_SUPPORT = False

    def __init__(self):
        self._var = POINTER(POINTER(c_char_p))
        self.realm = None

    # py_test_python function
    py_test_python = _cpl_lib.CPLC_test_python
    py_test_python.argtypes = \
        [c_int,
         c_double,
         c_bool,
         ndpointer(np.int32, ndim=2, flags='aligned, f_contiguous'),
         ndpointer(np.float64, ndim=2,  flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(2,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(2,), flags='aligned, f_contiguous')]

    @abortMPI
    def test_python(self, int_p, doub_p, bool_p, int_pptr, doub_pptr):
        int_pptr_dims = np.array(int_pptr.shape, order='F', dtype=np.int32)
        doub_pptr_dims = np.array(doub_pptr.shape, order='F', dtype=np.int32)
        self.py_test_python(int_p, doub_p, bool_p, int_pptr, doub_pptr,
                            int_pptr_dims, doub_pptr_dims)

    #NOTE: Using CPLC_init_Fort and Comm.f2py() and Comm.py2f() we achieve integration
    #      with MPICH and OpenMPI seamlessly. mpi4py >= 2.0.0 is needed.
    if StrictVersion(mpi4py.__version__) < StrictVersion('2.0.0'):
        raise mpi4py_version_error("Comm.f2py() and Comm.py2f()" + 
                                   " require mpi4py >= 2.0.0")

    #Detect if OpenMPI or MPICH
    mpicc=str(mpi4py.get_config()['mpicc'])
    mpishow=sp.check_output(["mpicc","-show"])
    if ("open" in mpicc or "open" in mpishow):
        MPI_version = "OPENMPI"
        ompi_info = sp.check_output("ompi_info").split("\n")
        for m in ompi_info:
            if ("Open MPI:" in m):
                ompi_major_version_no = int(m.split(":")[-1].split(".")[0])            
    elif ("mpich" in mpicc or "open" in mpishow):
        MPI_version = "MPICH"      
    else:
        print("UNKNOWN MPI VERSION FROM ", mpicc)
        MPI_version = "UNKNOWN"

    _py_init = _cpl_lib.CPLC_init_Fort
    _py_init.argtypes = [c_int, POINTER(c_int)]

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    @abortMPI
    def init(self, calling_realm):
        self.realm = calling_realm
        MPI_Comm_int = c_int()
        self._py_init(calling_realm, byref(MPI_Comm_int))
        self.COMM = MPI.Comm.f2py(MPI_Comm_int.value)
        return self.COMM 


    if JSON_SUPPORT:
        _py_load_param_file = _cpl_lib.CPLC_load_param_file
        _py_load_param_file.argtypes = [c_char_p]

    @abortMPI
    def load_param_file(self, fname):
        self._py_load_param_file(c_char_p(fname), c_int(len(fname)))


    if JSON_SUPPORT:
        _py_close_param_file = _cpl_lib.CPLC_close_param_file

    @abortMPI
    def close_param_file(self):
        self._py_close_param_file()


    @abortMPI
    def get_file_var(self, section, var_name, var_type):
        try:
            fun_name = _CPL_GET_FILE_VARS[var_type][0]
            var_ctype = _CPL_GET_FILE_VARS[var_type][1]

            fun = getattr(self._cpl_lib, "CPLC_" + fun_name)
            fun.argtypes =  [c_char_p,
                             c_char_p,
                             POINTER(var_ctype)]

        except KeyError:
            print ("CPL-ERROR: CPL Library function '" +
                   str(fun_name) + "' not found!")
            raise KeyError
        else:
            self._var = var_ctype()

            if ("array" in fun_name):
                print ("ENTRO")
                var_len = c_int()
                fun.argtypes.append(POINTER(c_int))
                print ("EY")
                fun(c_char_p(section), c_char_p(var_name), byref(self._var), byref(var_len))
                print ("len:" , var_len.value)
                #print (self._var[0])
                #print (byref(var[0]))
                a = ([self._var[i] for i in xrange(var_len.value)])
                return a
            else:
                fun(c_char_p(section), c_char_p(var_name), byref(self._var))
                return self._var.value
    _py_finalize = _cpl_lib.CPLC_finalize

    @abortMPI
    def finalize(self):
        self._py_finalize()

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_setup_cfd = _cpl_lib.CPLC_setup_cfd_Fort

    py_setup_cfd.argtypes = \
        [c_int,
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def setup_cfd(self, icomm_grid, xyzL, 
                        xyz_orig, ncxyz):
        """
            setup_cfd(icomm_grid, xyzL, xyz_orig, ncxyz):
        """

        if (  ((type(icomm_grid) is list) and (len(icomm_grid) is 3))
           or ((type(icomm_grid) is np.array) and (icomm_grid.shape[0] is 3))):
            icomm_grid = self.COMM.Create_cart([icomm_grid[0], 
                                                icomm_grid[1], 
                                                icomm_grid[2]])

        if ((type(xyzL) is list) or 
            (xyzL.dtype != np.float64) or 
            (not xyzL.flags["F_CONTIGUOUS"])):
            xyzL = np.array(xyzL, order='F', dtype=np.float64)

        if ((type(xyz_orig) is list) or 
            (xyz_orig.dtype != np.float64) or 
            (not xyz_orig.flags["F_CONTIGUOUS"])):
            xyz_orig = np.array(xyz_orig, order='F', dtype=np.float64)

        if ((type(ncxyz) is list) or 
            (ncxyz.dtype != np.int32) or 
            (not ncxyz.flags["F_CONTIGUOUS"])):
            ncxyz = np.array(ncxyz, order='F', dtype=np.int32)

        self.py_setup_cfd(icomm_grid.py2f(), xyzL,
                          xyz_orig, ncxyz)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 


    py_setup_md = _cpl_lib.CPLC_setup_md_Fort

    py_setup_md.argtypes = \
        [c_int,
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def setup_md(self, icomm_grid, xyzL, xyz_orig):
        """
        setup_md(icomm_grid, xyzL, xyz_orig)

        """
        if (  ((type(icomm_grid) is list) and (len(icomm_grid) is 3))
           or ((type(icomm_grid) is np.array) and (icomm_grid.shape[0] is 3))):
            icomm_grid = self.COMM.Create_cart([icomm_grid[0], 
                                                icomm_grid[1], 
                                                icomm_grid[2]])

        if ((type(xyzL) is list) or 
            (xyzL.dtype != np.float64) or 
            (not xyzL.flags["F_CONTIGUOUS"])):
            xyzL = np.array(xyzL, order='F', dtype=np.float64)

        if ((type(xyz_orig) is list) or 
            (xyz_orig.dtype != np.float64) or 
            (not xyz_orig.flags["F_CONTIGUOUS"])):
            xyz_orig = np.array(xyz_orig, order='F', dtype=np.float64)

        self.py_setup_md(icomm_grid.py2f(), xyzL, xyz_orig)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_proc_extents = _cpl_lib.CPLC_proc_extents
    py_proc_extents.argtypes = \
        [ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous'),
         c_int,
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def proc_extents(self, coord, realm):
        coord = self._type_check(coord)
        extents = np.zeros(6, order='F', dtype=np.int32)
        self.py_proc_extents(coord, realm, extents)
        return extents

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_my_proc_extents = _cpl_lib.CPLC_my_proc_extents
    py_my_proc_extents.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def my_proc_extents(self):
        extents = np.zeros(6, order='F', dtype=np.int32)
        self.py_my_proc_extents(extents)
        return extents

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_proc_portion = _cpl_lib.CPLC_proc_portion
    py_proc_portion.argtypes = \
        [ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous'),
         c_int,
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def proc_portion(self, coord, realm, limits):
        coord = self._type_check(coord)
        limits = self._type_check(limits)
        portion = np.zeros(6, order='F', dtype=np.int32)
        self.py_proc_portion(coord, realm, limits, portion)
        return portion

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_my_proc_portion = _cpl_lib.CPLC_my_proc_portion
    py_my_proc_portion.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def my_proc_portion(self, limits):
        limits = self._type_check(limits)
        portion = np.zeros(6, order='F', dtype=np.int32)
        self.py_my_proc_portion(limits, portion)
        return portion

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_map_cfd2md_coord = _cpl_lib.CPLC_map_cfd2md_coord
    py_map_cfd2md_coord.argtypes = \
        [ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def map_cfd2md_coord(self, coord_cfd):
        coord_cfd = self._type_check(coord_cfd)
        coord_md = np.zeros(3, order='F', dtype=np.float64)
        self.py_map_cfd2md_coord(coord_cfd, coord_md)
        return coord_md

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_map_md2cfd_coord = _cpl_lib.CPLC_map_md2cfd_coord
    py_map_md2cfd_coord.argtypes = \
        [ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def map_md2cfd_coord(self, coord_md):
        coord_md = self._type_check(coord_md)
        coord_cfd = np.zeros(3, order='F', dtype=np.float64)
        self.py_map_md2cfd_coord(coord_md, coord_cfd)
        return coord_cfd

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_map_glob2loc_cell = _cpl_lib.CPLC_map_glob2loc_cell
    py_map_glob2loc_cell.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def map_glob2loc_cell(self, limits, glob_cell):
        limits = self._type_check(limits)
        glob_cell = self._type_check(glob_cell)
        loc_cell = np.zeros(3, order='F', dtype=np.int32)
        self.py_map_glob2loc_cell(limits, glob_cell, loc_cell)
        return loc_cell

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_map_cell2coord = _cpl_lib.CPLC_map_cell2coord
    py_map_cell2coord.argtypes = \
        [c_int, c_int, c_int,
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def map_cell2coord(self, i, j, k):
        coord = np.zeros(3, order='F', dtype=np.float64)
        self.py_map_cell2coord(i, j, k, coord)
        return coord

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_map_coord2cell = _cpl_lib.CPLC_map_coord2cell
    py_map_coord2cell.argtypes = \
        [c_double, c_double, c_double,
         ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def map_coord2cell(self, x, y, z):
        cell = np.zeros(3, order='F', dtype=np.int32)
        self.py_map_coord2cell(x, y, z, cell)
        return cell

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_get_no_cells = _cpl_lib.CPLC_get_no_cells
    py_get_no_cells.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def get_no_cells(self, limits):
        limits = self._type_check(limits)
        no_cells = np.zeros(3, order='F', dtype=np.int32)
        self.py_get_no_cells(limits, no_cells)
        return no_cells

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    #Limits of overlap region
    py_get_olap_limits = _cpl_lib.CPLC_get_olap_limits
    py_get_olap_limits.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def get_olap_limits(self):
        limits = np.zeros(6, order='F', dtype=np.int32)
        self.py_get_olap_limits(limits)
        return limits

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    #Limits of contraint region
    py_get_cnst_limits = _cpl_lib.CPLC_get_cnst_limits
    py_get_cnst_limits.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def get_cnst_limits(self):
        limits = np.zeros(6, order='F', dtype=np.int32)
        self.py_get_cnst_limits(limits)
        return limits

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    #Limits of boundary region
    py_get_bnry_limits = _cpl_lib.CPLC_get_bnry_limits
    py_get_bnry_limits.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def get_bnry_limits(self):
        limits = np.zeros(6, order='F', dtype=np.int32)
        self.py_get_bnry_limits(limits)
        return limits

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_set_timing = _cpl_lib.CPLC_set_timing
    py_set_timing.argtypes = \
        [c_int, c_int, c_double]

    #Don't call abortMPI so it can be handled nicely in Python.
    #@abortMPI
    def set_timing(self, initialstep, nsteps, dt):
        class DepricatedException(Exception):
            """Raise Error as function should not be used"""
        raise DepricatedException("CPL set_timing is depricated and should not be used")
        self.py_set_timing(initialstep, nsteps, dt)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_send = _cpl_lib.CPLC_send
    py_send.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'), 
         POINTER(c_bool)]

    py_send_min = _cpl_lib.CPLC_send_min
    py_send_min.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         POINTER(c_bool)]

    @abortMPI
    def send(self, asend, limits=None):

        asend = self._type_check(asend)
        asend_shape = np.array(asend.shape, order='F', dtype=np.int32)
        send_flag = c_bool()

        if limits is None:
            self.py_send_min(asend, asend_shape, byref(send_flag))
        else:
            self.py_send(asend, asend_shape, limits, byref(send_flag))

        return send_flag.value

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_recv = _cpl_lib.CPLC_recv
    py_recv.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'), 
         POINTER(c_bool)]

    py_recv_min = _cpl_lib.CPLC_recv_min
    py_recv_min.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         POINTER(c_bool)]


    @abortMPI
    def recv(self, arecv, limits=None):

        arecv = self._type_check(arecv)
        arecv_shape = np.array(arecv.shape, order='F', dtype=np.int32)
        recv_flag = c_bool()
        if limits is None:
            self.py_recv_min(arecv, arecv_shape, byref(recv_flag))
        else:
            self.py_recv(arecv, arecv_shape, limits, byref(recv_flag))
        return arecv, recv_flag.value

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_gather = _cpl_lib.CPLC_gather
    py_gather.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous')]

    @abortMPI
    def gather(self, gather_array, limits, recv_array):
        gather_array = self._type_check(gather_array)
        recv_array = self._type_check(recv_array)
        gather_shape = np.array(gather_array.shape, order='F', dtype=np.int32)
        recv_shape = np.array(recv_array.shape, order='F', dtype=np.int32)
        self.py_gather(gather_array, gather_shape, limits, recv_array,
                       recv_shape)

        return recv_array

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_scatter = _cpl_lib.CPLC_scatter
    py_scatter.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous')]

    @abortMPI
    def scatter(self, scatter_array, limits, recv_array):
        scatter_array = self._type_check(scatter_array)
        recv_array = self._type_check(recv_array)
        scatter_shape = np.array(scatter_array.shape, order='F',
                                 dtype=np.int32)
        recv_shape = np.array(recv_array.shape, order='F', dtype=np.int32)
        self.py_scatter(scatter_array, scatter_shape, limits,
                        recv_array, recv_shape)
        return recv_array

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_swaphalos = _cpl_lib.CPLC_swaphalos
    py_swaphalos.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous')]

    @abortMPI
    def swaphalos(self, A):
        A = self._type_check(A)
        A_shape = np.array(A.shape, order='F', dtype=np.int32)
        self.py_swaphalos(A, A_shape)
        return A

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_overlap = _cpl_lib.CPLC_overlap
    py_overlap.argtypes = []

    @abortMPI
    def overlap(self):
        self.py_overlap.restype = c_bool
        return self.py_overlap()

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    py_is_proc_inside = _cpl_lib.CPLC_is_proc_inside
    py_is_proc_inside.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def is_proc_inside(self, region):
        self.py_is_proc_inside.restype = c_bool
        region = self._type_check(region)
        return self.py_is_proc_inside(region)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    @abortMPI
    def get(self, var_name):
        try:
            var_type = _CPL_GET_VARS[var_name]
            fun = getattr(self._cpl_lib, "CPLC_" + var_name)
        except KeyError:
            print ("CPL-ERROR: CPL Library function '" +
                   str(var_name) + "' not found!")
            print ("Available options include: ")
            for var in _CPL_GET_VARS:
                print(var)
            raise KeyError
        else:
            fun.restype = var_type
            return fun()

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    @abortMPI
    def set(self, var_name, value):
        try:
            var_type = _CPL_SET_VARS[var_name]
            fun = getattr(self._cpl_lib, "CPLC_set_" + var_name)
        except KeyError:
            print ("CPL-ERROR: CPL Library function '" +
                   str(var_name) + "' not found!")
            raise KeyError
        else:
            fun.argtypes = [var_type]
            return fun(var_type(value))

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    @abortMPI
    def _type_check(self, A):
        if type(A) is list:
            ndtype = type(A[0])
            if ndtype == float:
                ndtype = np.float64
            elif ndtype == int:
                ndtype = np.int32
            A = np.asfortranarray(A, dtype=ndtype)
        if not A.flags["F_CONTIGUOUS"]:
            A = np.require(A, requirements=['F'])
        if not A.flags["ALIGNED"]:
            A = np.require(A, requirements=['A'])
        return A

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    @abortMPI
    def get_arrays(self, recv_size, send_size):

        """
          Return recv array and send array based
          on constraint/boundary sizes
        """
        #Get constraint region
        cnst_limits = self.get_cnst_limits();
        cnst_portion = self.my_proc_portion(cnst_limits)
        cnst_ncxl, cnst_ncyl, cnst_nczl = self.get_no_cells(cnst_portion)

        #Get overlap region
        BC_limits = self.get_bnry_limits()
        BC_portion = self.my_proc_portion(BC_limits)
        BC_ncxl, BC_ncyl, BC_nczl = self.get_no_cells(BC_portion)

        #Allocate send and recv arrays
        recv_array = np.zeros((recv_size, BC_ncxl, BC_ncyl, BC_nczl), order='F', dtype=np.float64)
        send_array = np.zeros((send_size, cnst_ncxl, cnst_ncyl, cnst_nczl), order='F', dtype=np.float64)

        return recv_array, send_array

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 

    @abortMPI
    def dump_region(self, region, array, fname, comm, components={}, coords="mine"):
            lines = ""
            portion = self.my_proc_portion(region)
            cell_coords = np.array(3)
            dx = self.get("dx")
            dy = self.get("dy")
            dz = self.get("dz")
            def_func = lambda x : x
            components_dic = components
            if callable(components):
                def_func = components
            if not components or callable(components):
                components_idx = list(xrange(0, array.shape[0]))
                for c_idx in components_idx:
                    components_dic[c_idx] = def_func
            for k,v in components_dic.items():
                if v is None:
                    components_dic[k] = def_func
            #if self.overlap():
            if self.is_proc_inside(portion):
                ncx, ncy, ncz = self.get_no_cells(portion)
                if (ncx, ncy, ncz) != array.shape[1:]:
                    print ("self-Error in dump_region(): array and processor portion of different size.")
                    MPI.COMM_WORLD.Abort(errorcode=1)

                for i in xrange(portion[0], portion[1]+1):
                    for j in xrange(portion[2], portion[3]+1):
                        for k in xrange(portion[4], portion[5]+1):
                            cell_coords = self.map_cell2coord(i, j, k)
                            if coords != "mine":
                                if self.realm == CPL.CFD_REALM:
                                    cell_coords = self.map_cfd2md_coord(cell_coords)
                                else:
                                  cell_coords = self.map_md2cfd_coord(cell_coords)
                            [i_loc, j_loc, k_loc] = self.map_glob2loc_cell(portion, [i, j, k])
                            lines += str(cell_coords[0] + dx/2.0) + " "\
                                   + str(cell_coords[1] + dy/2.0) + " "\
                                   + str(cell_coords[2] + dz/2.0)
                                   
                            for k, f in components_dic.items():
                                lines += " " + str(f(array[k, i_loc, j_loc, k_loc]))
                            lines += "\n"

            # Gather all the forces from every processor and dump them to a file at the root
            lines = comm.gather(lines, root=0)

            myrank = comm.Get_rank()
            if myrank == 0:
                with open(fname, "w") as file_out:
                    file_out.writelines(lines)
Ejemplo n.º 39
0
# ---

import mpi4py
try: mpi4py.get_include()
except: pass
try: mpi4py.get_config()
except: pass

# ---

def test_mp4py_rc():
    import mpi4py.rc
    mpi4py.rc(
    initialize = True,
    threads = True,
    thread_level = 'multiple',
    finalize = None,
    fast_reduce = True,
    recv_mprobe = True,
    errors = 'exception',
    )
    try: mpi4py.rc(qwerty=False)
    except TypeError: pass
    else: raise RuntimeError

test_mp4py_rc()

# ---

def test_mp4py_profile():
    import mpi4py
Ejemplo n.º 40
0
def info():
    """
    When called via::

        # python3 -m netket.tools.check_mpi
        mpi4py_available     : True
        mpi4jax_available : True
        n_nodes           : 1

    this will print out basic MPI information to make allow users to check whether
    the environment has been set up correctly.
    """
    print("====================================================")
    print("==         NetKet Diagnostic Informations         ==")
    print("====================================================")

    # try to import version without import netket itself
    from .. import _version

    printfmt("NetKet version", _version.version)
    print()

    print("# Python")
    printfmt("implementation", platform.python_implementation(), indent=1)
    printfmt("version", platform.python_version(), indent=1)
    printfmt("distribution", platform.python_compiler(), indent=1)
    printfmt("path", sys.executable, indent=1)
    print()

    # Try to detect platform
    print("# Host informations")
    printfmt("System      ", platform.platform(), indent=1)
    printfmt("Architecture", platform.machine(), indent=1)

    # Try to query cpu info
    platform_info = cpu_info()

    printfmt("AVX", platform_info["supports_avx"], indent=1)
    printfmt("AVX2", platform_info["supports_avx2"], indent=1)
    if "cpu cores" in platform_info:
        printfmt("Cores", platform_info["cpu cores"], indent=1)
    elif "cpu_cores" in platform_info:
        printfmt("Cores", platform_info["cpu_cores"], indent=1)
    elif "core_count" in platform_info:
        printfmt("Cores", platform_info["core_count"], indent=1)
    print()

    # try to load jax
    print("# NetKet dependencies")
    printfmt("numpy", version("numpy"), indent=1)
    printfmt("jaxlib", version("jaxlib"), indent=1)
    printfmt("jax", version("jax"), indent=1)
    printfmt("flax", version("flax"), indent=1)
    printfmt("optax", version("optax"), indent=1)
    printfmt("numba", version("numba"), indent=1)
    printfmt("mpi4py", version("mpi4py"), indent=1)
    printfmt("mpi4jax", version("mpi4jax"), indent=1)
    printfmt("netket", version("netket"), indent=1)
    print()

    if is_available("jax"):
        print("# Jax ")
        import jax

        backends = _jax_backends()
        printfmt("backends", backends, indent=1)
        for backend in backends:
            printfmt(
                f"{backend}",
                [_fmt_device(dev) for dev in jax.devices(backend)],
                indent=2,
            )
        print()

    if is_available("mpi4jax"):
        print("# MPI4JAX")
        import mpi4jax

        if hasattr(mpi4jax, "has_cuda_support"):
            printfmt("HAS_GPU_EXT", mpi4jax.has_cuda_support(), indent=1)
        elif hasattr(mpi4jax, "_src"):
            if hasattr(mpi4jax._src, "xla_bridge"):
                if hasattr(mpi4jax._src.xla_bridge, "HAS_GPU_EXT"):
                    printfmt(
                        "HAS_GPU_EXT", mpi4jax._src.xla_bridge.HAS_GPU_EXT, indent=1
                    )
        print()

    if is_available("mpi4py"):
        print("# MPI ")
        import mpi4py
        from mpi4py import MPI

        printfmt("mpi4py", indent=1)
        printfmt("MPICC", mpi4py.get_config()["mpicc"], indent=1)
        printfmt(
            "MPI link flags",
            get_link_flags(exec_in_terminal([mpi4py.get_config()["mpicc"], "-show"])),
            indent=1,
        )
        printfmt("MPI version", MPI.Get_version(), indent=2)
        printfmt("MPI library_version", MPI.Get_library_version(), indent=2)

        global_info = get_global_mpi_info()
        printfmt("global", indent=1)
        printfmt("MPICC", global_info["mpicc"], indent=2)
        printfmt("MPI link flags", global_info["link_flags"], indent=2)
        print()
name_push = "coarseField"
name_fetch = "fineField"

# Define MUI push/fetch data types
data_types_push = {name_push: mui4py.FLOAT64}
data_types_fetch = {name_fetch: mui4py.FLOAT64}

# MUI interface creation
domain = "coarseDomain"
config2d = mui4py.Config(dimensionMUI, mui4py.FLOAT64)
iface = ["interface2D01", "interface2D02"]
MUI_Interfaces = mui4py.create_unifaces(domain, iface, config2d)
MUI_Interfaces["interface2D01"].set_data_types(data_types_fetch)
MUI_Interfaces["interface2D02"].set_data_types(data_types_push)

print("mpi4py.get_config(): ", mpi4py.get_config(), "\n")
print("mui4py.get_compiler_config(): ", mui4py.get_compiler_config(), "\n")
print("mui4py.get_compiler_version(): ", mui4py.get_compiler_version(), "\n")
print("mui4py.get_mpi_version(): ", mui4py.get_mpi_version(), "\n")

# Define the forget steps of MUI to reduce the memory
forgetSteps = int(5)

# Define the search radius of the RBF sampler
# The search radius should not set to a very large value so that to ensure a good convergence
rSampler = 0.4

# Define parameters of the RBF sampler
cutoff = 1e-9
iConservative = False
iPolynomial = True