Exemplo n.º 1
0
def use_mpi_machinefile(mpirun_command, mpi_file_path):
    # use MPI machine file if available, assuming each line of which is:
    # ip_address slots=n max-slots=n   --- Open MPI
    # ip_address:n                     --- MPICH, MVAPICH
    node_count = get_mpi_num_processes(mpi_file_path)

    if MPI.get_vendor()[0] == 'Open MPI':
        mpirun_command.insert(3, "-machinefile")
        # use mpirun to find where MPI is installed
        import shutil
        path = os.path.split(shutil.which('mpirun'))[0]
        if path.endswith('bin'):
            path = path[:-3]
        mpirun_command[4:4] = [
            "--prefix", path, "-x", "PATH", "-x", "LD_LIBRARY_PATH"
        ]
    elif 'MPICH' in MPI.get_vendor()[0] or 'MVAPICH' in MPI.get_vendor()[0]:
        mpirun_command.insert(3, "-f")
    else:
        raise RuntimeError(
            "mpi4py is built on top of unrecognized MPI library. "
            "Only Open MPI, MPICH, and MVAPICH are tested.")
    mpirun_command[2] = str(node_count)  # use all available nodes
    mpirun_command.insert(4, mpi_file_path)

    return mpirun_command
Exemplo n.º 2
0
def check_mpi():
    mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
    for executable, path in mpi4py.get_config().items():
        if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']:
            continue
        if mpiexec_path not in path:
            raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config()))
    if 'Open MPI' not in MPI.get_vendor():
        raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.")
    vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
    if vendor_number not in mpiexec_path:
        raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path))
Exemplo n.º 3
0
def set_default_mpi_parameters(parameters):
    # If mpi4py is used, make sure we can import it and set the rank/size for all cores in the parameters.mpi
    use_mpi4py = True
    if 'relaxations' in parameters:
        for module in parameters.relaxations:
            parameters.relaxations[module].setdefault('use_mpi4py', False)
            parameters.relaxations[module].setdefault('MPMD', 0)
            if parameters.relaxations[module].use_mpi4py:
                use_mpi4py = True
    if 'fitnesses' in parameters:
        for module in parameters.fitnesses:
            parameters.fitnesses[module].setdefault('use_mpi4py', False)
            parameters.fitnesses[module].setdefault('MPMD', 0)
            if parameters.fitnesses[module].use_mpi4py:
                use_mpi4py = True

    parameters.setdefault('mpi', {})
    if use_mpi4py:
        try:
            import mpi4py
        except ImportError:
            raise ImportError("mpi4py must be installed to use StructOpt.")
        mpiexec_path, _ = os.path.split(
            distutils.spawn.find_executable("mpiexec"))
        for executable, path in mpi4py.get_config().items():
            if executable not in [
                    'mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort'
            ]:
                continue
            if mpiexec_path not in path:
                raise ImportError(
                    "mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n"
                    .format(mpiexec_path=mpiexec_path,
                            mpi4py_config=mpi4py.get_config()))
        from mpi4py import MPI
        if 'Open MPI' not in MPI.get_vendor():
            raise ImportError(
                "mpi4py must have been installed against Open MPI in order for StructOpt to function correctly."
            )
        vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
        if vendor_number not in mpiexec_path:
            raise ImportError(
                "The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}"
                .format(MPI.get_vendor(), mpiexec_path))

        parameters.mpi.rank = MPI.COMM_WORLD.Get_rank()
        parameters.mpi.ncores = MPI.COMM_WORLD.Get_size()
    else:
        parameters.mpi.rank = 0
        parameters.mpi.ncores = 1

    return parameters
def main(split_into=2, nloops=3):
    world = MPI.COMM_WORLD
    rank = world.Get_rank()
    size = world.Get_size()
    if size < split_into:
        raise ValueError("The number of cores passed to 'mpiexec' must be greater than the number of desired communicators.")
    cores_per_comm = size // split_into

    # Create fake data for input for each of the different processes we will spawn
    multipliers = [i+1 for i in range(split_into)]
    if 'Open MPI' not in MPI.get_vendor():
        colors = [(i+1)//split_into for i in range(split_into)]
        data_by_process = [(str(multipliers[i]), str(colors[i])) for i in range(split_into)]
    else:
        data_by_process = [(str(multipliers[i]),) for i in range(split_into)]


    if rank == 0:
        print("At each iteration we will spawn {} workers with {} cores each out of a total of {} cores.".format(split_into, cores_per_comm, size))
        print("Those {} split communicators will get the following as input:".format(split_into))
        for i in range(split_into):
            print("    Communicator {}: {}".format(i, data_by_process[i]))

        for i in range(nloops):
            print("Iteration {}...".format(i))
            spawn_multiple(split_into, cores_per_comm, data_by_process)
Exemplo n.º 5
0
def getlibraryinfo():
    from mpi4py import MPI
    info = "MPI %d.%d" % MPI.Get_version()
    name, version = MPI.get_vendor()
    if name != "unknown":
        info += (" (%s %s)" % (name, '%d.%d.%d' % version))
    return info
Exemplo n.º 6
0
def gather_mpi_arguments(hostfile, params):
    from mpi4py import MPI
    vendor = MPI.get_vendor()
    print_and_log(['MPI detected: %s' % str(vendor)], 'debug', logger)
    if vendor[0] == 'Open MPI':
        mpi_args = ['mpirun']
        if os.getenv('LD_LIBRARY_PATH'):
            mpi_args += ['-x', 'LD_LIBRARY_PATH']
        if os.getenv('PATH'):
            mpi_args += ['-x', 'PATH']
        if os.getenv('PYTHONPATH'):
            mpi_args += ['-x', 'PYTHONPATH']
        if os.path.exists(hostfile):
            mpi_args += ['-hostfile', hostfile]
    elif vendor[0] == 'Microsoft MPI':
        mpi_args = ['mpiexec']
        if os.path.exists(hostfile):
            mpi_args += ['-machinefile', hostfile]
    elif vendor[0] == 'MPICH2':
        mpi_args = ['mpiexec']
        if os.path.exists(hostfile):
            mpi_args += ['-f', hostfile]
    elif vendor[0] == 'MPICH':
        mpi_args = ['mpiexec']
        if os.path.exists(hostfile):
            mpi_args += ['-f', hostfile]
    else:
        print_and_log([
                        '%s may not be yet properly implemented: contact developpers' %
                        vendor[0]], 'error', logger)
        mpi_args = ['mpirun']
        if os.path.exists(hostfile):
            mpi_args += ['-hostfile', hostfile]
    return mpi_args
Exemplo n.º 7
0
def main(split_into=2, nloops=3):
    world = MPI.COMM_WORLD
    rank = world.Get_rank()
    size = world.Get_size()
    if size < split_into:
        raise ValueError(
            "The number of cores passed to 'mpiexec' must be greater than the number of desired communicators."
        )
    cores_per_comm = size // split_into

    # Create fake data for input for each of the different processes we will spawn
    multipliers = [i + 1 for i in range(split_into)]
    if 'Open MPI' not in MPI.get_vendor():
        colors = [(i + 1) // split_into for i in range(split_into)]
        data_by_process = [(str(multipliers[i]), str(colors[i]))
                           for i in range(split_into)]
    else:
        data_by_process = [(str(multipliers[i]), ) for i in range(split_into)]

    if rank == 0:
        print(
            "At each iteration we will spawn {} workers with {} cores each out of a total of {} cores."
            .format(split_into, cores_per_comm, size))
        print("Those {} split communicators will get the following as input:".
              format(split_into))
        for i in range(split_into):
            print("    Communicator {}: {}".format(i, data_by_process[i]))

        for i in range(nloops):
            print("Iteration {}...".format(i))
            spawn_multiple(split_into, cores_per_comm, data_by_process)
Exemplo n.º 8
0
    def pytest_terminal_summary(self, terminalreporter, exitstatus, *args):
        """
        Hook for printing MPI info at the end of the run
        """
        # pylint: disable=unused-argument
        if self._is_testing_mpi:
            terminalreporter.section("MPI Information")
            try:
                from mpi4py import MPI, rc, get_config
            except ImportError:
                terminalreporter.write("Unable to import mpi4py")
            else:
                comm = MPI.COMM_WORLD
                terminalreporter.write("rank: {}\n".format(comm.rank))
                terminalreporter.write("size: {}\n".format(comm.size))

                terminalreporter.write("MPI version: {}\n".format(
                    '.'.join([str(v) for v in MPI.Get_version()])
                ))
                terminalreporter.write("MPI library version: {}\n".format(
                    MPI.Get_library_version()
                ))

                vendor, vendor_version = MPI.get_vendor()
                terminalreporter.write("MPI vendor: {} {}\n".format(
                    vendor, '.'.join([str(v) for v in vendor_version])
                ))

                terminalreporter.write("mpi4py rc: \n")
                for name, value in vars(rc).items():
                    terminalreporter.write(" {}: {}\n".format(name, value))

                terminalreporter.write("mpi4py config:\n")
                for name, value in get_config().items():
                    terminalreporter.write(" {}: {}\n".format(name, value))
Exemplo n.º 9
0
def getlibraryinfo():
    from mpi4py import MPI
    info = "MPI %d.%d" % MPI.Get_version()
    name, version = MPI.get_vendor()
    if name != "unknown":
        info += (" (%s %s)" % (name, '%d.%d.%d' % version))
    return info
Exemplo n.º 10
0
def mpi_launch(subtask,
               filename,
               nb_cpu,
               nb_gpu,
               use_gpu,
               output=None,
               benchmark=None,
               sim_same_elec=None):
    args = ['mpirun']

    from mpi4py import MPI
    vendor = MPI.get_vendor()
    if vendor[0] == 'Open MPI':
        args = ['mpirun']
        if os.getenv('LD_LIBRARY_PATH'):
            args += ['-x', 'LD_LIBRARY_PATH']
        if os.getenv('PATH'):
            args += ['-x', 'PATH']
        if os.getenv('PYTHONPATH'):
            args += ['-x', 'PYTHONPATH']
    elif vendor[0] == 'Microsoft MPI':
        args = ['mpiexec']
    elif vendor[0] == 'MPICH2':
        mpi_args = ['mpiexec']
    elif vendor[0] == 'MPICH':
        mpi_args = ['mpiexec']

    if use_gpu == 'True':
        nb_tasks = str(nb_gpu)
    else:
        nb_tasks = str(nb_cpu)

    if subtask in ['merging', 'converting']:
        args += [
            '-np', nb_tasks, 'spyking-circus-subtask', subtask, filename,
            str(nb_cpu),
            str(nb_gpu), use_gpu, ''
        ]
    else:
        if subtask == 'benchmarking':
            if (output is None) or (benchmark is None):
                print(
                    "To generate synthetic datasets, you must provide output and type"
                )
                sys.exit(1)
            args += [
                '-np', nb_tasks, 'spyking-circus-subtask', subtask, filename,
                str(nb_cpu),
                str(nb_gpu), use_gpu, output, benchmark,
                str(sim_same_elec)
            ]
        else:
            args += [
                '-np', nb_tasks, 'spyking-circus-subtask', subtask, filename,
                str(nb_cpu),
                str(nb_gpu), use_gpu
            ]

    subprocess.check_call(args)
Exemplo n.º 11
0
    def run(self):
        from mpi4py import MPI

        cmd, args = self._command, self._args
        if self._use_wrapper:
            # We spawn this very same module
            # When invoked as a program (see at the bottom) this module
            # will get the parent communicator, run the program we're giving in the
            # command line, and send back the exit code.
            # Likewise, we barrier on the children communicator, and thus
            # we wait until all children processes are completed
            cmd = sys.executable
            args = ['-m', __name__, self._command] + self._args

        errcodes = []

        # Spawn the new MPI communicator and wait until it finishes
        # (it sends the stdout, stderr and exit codes of the programs)
        logger.info(
            "Executing MPI app in new communicator with %d ranks and command: %s %s",
            self._maxprocs, cmd, args)

        vendor, version = MPI.get_vendor()  # @UndefinedVariable
        info = MPI.Info.Create()  # @UndefinedVariable
        logger.debug("MPI vendor is %s, version %s", vendor,
                     '.'.join([str(x) for x in version]))  # @UndefinedVariable
        comm_children = MPI.COMM_SELF.Spawn(cmd,
                                            args=args,
                                            maxprocs=self._maxprocs,
                                            errcodes=errcodes,
                                            info=info)  # @UndefinedVariable

        n_children = comm_children.Get_remote_size()
        logger.info("%d MPI children apps spawned, gathering exit data",
                    n_children)

        if self._use_wrapper:
            children_data = comm_children.gather(
                ('', '', 0), root=MPI.ROOT)  # @UndefinedVariable
            exit_codes = [x[2] for x in children_data]
            logger.info("Exit codes gathered from children processes: %r",
                        exit_codes)

            any_failed = False
            for rank, (stdout, stderr, code) in enumerate(children_data):
                self._recompute_data[str(rank)] = [
                    code, str(stdout), str(stderr)
                ]
                if code == 0:
                    continue
                any_failed = True
                logger.error(
                    "stdout/stderr follow for rank %d:\nSTDOUT\n======\n%s\n\nSTDERR\n======\n%s",
                    rank, stdout, stderr)

            if any_failed:
                raise Exception("One or more MPI children didn't exit cleanly")
        else:
            comm_children.barrier()
Exemplo n.º 12
0
def ensure_mpd_is_running():
    if not is_mpd_running():
        name_of_the_vendor, version = MPI.get_vendor()
        if name_of_the_vendor == "MPICH2":
            try:
                process = subprocess.Popen(["nohup", "mpd"], close_fds=True)
            except OSError as ex:
                pass
Exemplo n.º 13
0
def ensure_mpd_is_running():
    if not is_mpd_running():
        name_of_the_vendor, version = MPI.get_vendor()
        if name_of_the_vendor == 'MPICH2':
            try:
                process = subprocess.Popen(['nohup', 'mpd'], close_fds=True)
            except OSError as ex:
                pass
Exemplo n.º 14
0
def is_mpd_running():
    name_of_the_vendor, version = MPI.get_vendor()
    if name_of_the_vendor == 'MPICH2':
        process = subprocess.Popen(['mpdtrace'], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
        (output_string, error_string) = process.communicate()
        return not (process.returncode == 255)
    else:
        return True
Exemplo n.º 15
0
def getlibraryinfo():
    from mpi4py import MPI
    x, y = MPI.Get_version()
    info = f"MPI {x}.{y}"
    name, version = MPI.get_vendor()
    if name != "unknown":
        x, y, z = version
        info += f" ({name} {x}.{y}.{z})"
    return info
Exemplo n.º 16
0
def badport():
    if MPI.get_vendor()[0] != 'MPICH':
        return False
    try:
        port = MPI.Open_port()
        MPI.Close_port(port)
    except:
        port = ""
    return port == ""
Exemplo n.º 17
0
def is_mpd_running():
    name_of_the_vendor, version = MPI.get_vendor()
    if name_of_the_vendor == 'MPICH2':
        process = subprocess.Popen(['mpdtrace'],
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        (output_string, error_string) = process.communicate()
        return not (process.returncode == 255)
    else:
        return True
Exemplo n.º 18
0
def set_default_mpi_parameters(parameters):
    # If mpi4py is used, make sure we can import it and set the rank/size for all cores in the parameters.mpi
    use_mpi4py = True
    if 'relaxations' in parameters:
        for module in parameters.relaxations:
            parameters.relaxations[module].setdefault('use_mpi4py', False)
            parameters.relaxations[module].setdefault('MPMD', 0)
            if parameters.relaxations[module].use_mpi4py:
                use_mpi4py = True
    if 'fitnesses' in parameters:
        for module in parameters.fitnesses:
            parameters.fitnesses[module].setdefault('use_mpi4py', False)
            parameters.fitnesses[module].setdefault('MPMD', 0)
            if parameters.fitnesses[module].use_mpi4py:
                use_mpi4py = True

    parameters.setdefault('mpi', {})
    if use_mpi4py:
        try:
            import mpi4py
        except ImportError:
            raise ImportError("mpi4py must be installed to use StructOpt.")
        mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
        for executable, path in mpi4py.get_config().items():
            if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']:
                continue
            if mpiexec_path not in path:
                raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config()))
        from mpi4py import MPI
        if 'Open MPI' not in MPI.get_vendor():
            raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.")
        vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
        if vendor_number not in mpiexec_path:
            raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path))

        parameters.mpi.rank = MPI.COMM_WORLD.Get_rank()
        parameters.mpi.ncores = MPI.COMM_WORLD.Get_size()
    else:
        parameters.mpi.rank = 0
        parameters.mpi.ncores = 1

    return parameters
Exemplo n.º 19
0
def check_mpi():
    mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
    for executable, path in mpi4py.get_config().items():
        if executable not in [
                'mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort'
        ]:
            continue
        if mpiexec_path not in path:
            raise ImportError(
                "mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n"
                .format(mpiexec_path=mpiexec_path,
                        mpi4py_config=mpi4py.get_config()))
    if 'Open MPI' not in MPI.get_vendor():
        raise ImportError(
            "mpi4py must have been installed against Open MPI in order for StructOpt to function correctly."
        )
    vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
    if vendor_number not in mpiexec_path:
        raise ImportError(
            "The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}"
            .format(MPI.get_vendor(), mpiexec_path))
Exemplo n.º 20
0
class ProcessPoolSubmitTest(unittest.TestCase):
    @unittest.skipIf(MPI.get_vendor()[0] == 'Microsoft MPI', 'msmpi')
    def test_multiple_executors(self):
        executor1 = futures.MPIPoolExecutor(1).bootup(wait=True)
        executor2 = futures.MPIPoolExecutor(1).bootup(wait=True)
        executor3 = futures.MPIPoolExecutor(1).bootup(wait=True)
        fs1 = [executor1.submit(abs, i) for i in range(100, 200)]
        fs2 = [executor2.submit(abs, i) for i in range(200, 300)]
        fs3 = [executor3.submit(abs, i) for i in range(300, 400)]
        futures.wait(fs3 + fs2 + fs1)
        for i, f in enumerate(fs1):
            self.assertEqual(f.result(), i + 100)
        for i, f in enumerate(fs2):
            self.assertEqual(f.result(), i + 200)
        for i, f in enumerate(fs3):
            self.assertEqual(f.result(), i + 300)
        executor1 = executor2 = executor3 = None

    def test_mpi_serialized_support(self):
        futures._lib.setup_mpi_threads()
        threading = futures._lib.threading
        serialized = futures._lib.serialized
        lock_save = serialized.lock
        try:
            if lock_save is None:
                serialized.lock = threading.Lock()
                executor = futures.MPIPoolExecutor(1).bootup()
                executor.submit(abs, 0).result()
                executor.shutdown()
                serialized.lock = lock_save
            else:
                serialized.lock = None
                with lock_save:
                    executor = futures.MPIPoolExecutor(1).bootup()
                    executor.submit(abs, 0).result()
                    executor.shutdown()
                serialized.lock = lock_save
        finally:
            serialized.lock = lock_save

    def test_shared_executors(self):
        if not SHARED_POOL: return
        executors = [futures.MPIPoolExecutor() for _ in range(16)]
        fs = []
        for i in range(128):
            fs.extend(
                e.submit(abs, i * 16 + j) for j, e in enumerate(executors))
        assert sorted(f.result() for f in fs) == list(range(16 * 128))
        world_size = MPI.COMM_WORLD.Get_size()
        num_workers = max(1, world_size - 1)
        for e in executors:
            self.assertEqual(e._pool.size, num_workers)
        del e, executors
Exemplo n.º 21
0
def get_mpi_num_processes(mpi_file_path):
    # use MPI machine file if available, assuming each line of which is:
    # ip_address slots=n max-slots=n   --- Open MPI
    # ip_address:n                     --- MPICH, MVAPICH
    with open(mpi_file_path, 'r') as f:
        node_count = 0
        if MPI.get_vendor()[0] == 'Open MPI':
            for line in f:
                line = line.split()
                node_count += int(line[1].split('=')[-1])
        elif 'MPICH' in MPI.get_vendor()[0] or 'MVAPICH' in MPI.get_vendor(
        )[0]:
            for line in f:
                line = line.split(":")
                node_count += int(line[1])
        else:
            raise RuntimeError(
                "mpi4py is built on top of unrecognized MPI library. "
                "Only Open MPI, MPICH, and MVAPICH are tested.")

    return node_count
Exemplo n.º 22
0
 def testGetEnvelope(self):
     for dtype in datatypes:
         try:
             envelope = dtype.Get_envelope()
         except NotImplementedError:
             return
         if ('LAM/MPI' == MPI.get_vendor()[0] and
             "COMPLEX" in dtype.name): continue
         ni, na, nd, combiner = envelope
         self.assertEqual(combiner, MPI.COMBINER_NAMED)
         self.assertEqual(ni, 0)
         self.assertEqual(na, 0)
         self.assertEqual(nd, 0)
Exemplo n.º 23
0
def test_h5py(outputdir):
    fname = os.path.join(outputdir, 'run_test_helloworld.h5')
    f = h5py.File(fname, 'w')
    f['data'] = 'hello world!'
    f['data2'] = np.zeros((3, 5), np.int32)
    del f
    h5 = h5py.File(fname, 'r')
    assert h5['data'].value == 'hello world!'
    assert h5['data2'].shape[0] == 3
    assert h5['data2'].shape[1] == 5
    assert np.sum(h5['data2'][:]) == 0

    sys.stdout.write("mpi version: %r\n" % (MPI.get_vendor(), ))
    return 0
Exemplo n.º 24
0
def mpi_predicate(predicate):
    from mpi4py import MPI
    def key(s):
        s = s.replace(' ', '')
        s = s.replace('/', '')
        s = s.replace('-', '')
        s = s.replace('Microsoft', 'MS')
        return s.lower()
    vp = VersionPredicate(key(predicate))
    if vp.name == 'mpi':
        name, version = 'mpi', MPI.Get_version()
        version = version + (0,)
    else:
        name, version = MPI.get_vendor()
    if vp.name == key(name):
        if vp.satisfied_by('%d.%d.%d' % version):
            return vp
    return None
Exemplo n.º 25
0
def skipMPI(predicate, *conditions):
    from mpi4py import MPI
    def key(s):
        s = s.replace(' ', '')
        s = s.replace('/', '')
        s = s.replace('-', '')
        s = s.replace('Microsoft', 'MS')
        return s.lower()
    vp = VersionPredicate(key(predicate))
    if vp.name == 'mpi':
        name, version = 'mpi', MPI.Get_version()
        version = version + (0,)
    else:
        name, version = MPI.get_vendor()
    if vp.name == key(name):
        if vp.satisfied_by('%d.%d.%d' % version):
            if not conditions or any(conditions):
                return unittest.skip(str(vp))
    return unittest.skipIf(False, '')
Exemplo n.º 26
0
    def get_intro_string(self):
        """Return the string to append to the end of the relax introduction string.

        @return:    The string describing this Processor fabric.
        @rtype:     str
        """

        # Get the specific MPI version.
        version_info = MPI.Get_version()

        # The vendor info.
        vendor = MPI.get_vendor()
        vendor_name = vendor[0]
        vendor_version = str(vendor[1][0])
        for i in range(1, len(vendor[1])):
            vendor_version = vendor_version + '.%i' % vendor[1][i]

        # Return the string.
        return "MPI %s.%s running via mpi4py with %i slave processors & 1 master.  Using %s %s." % (version_info[0], version_info[1], self.processor_size(), vendor_name, vendor_version)
Exemplo n.º 27
0
    def get_intro_string(self):
        """Return the string to append to the end of the relax introduction string.

        @return:    The string describing this Processor fabric.
        @rtype:     str
        """

        # Get the specific MPI version.
        version_info = MPI.Get_version()

        # The vendor info.
        vendor = MPI.get_vendor()
        vendor_name = vendor[0]
        vendor_version = str(vendor[1][0])
        for i in range(1, len(vendor[1])):
            vendor_version = vendor_version + '.%i' % vendor[1][i]

        # Return the string.
        return "MPI %s.%s running via mpi4py with %i slave processors & 1 master.  Using %s %s." % (version_info[0], version_info[1], self.processor_size(), vendor_name, vendor_version)
Exemplo n.º 28
0
def get_watermark():
    """
    Return information about the current system relevant for pyasdf.
    """
    vendor = MPI.get_vendor() if MPI else None

    c = h5py.get_config()
    if not hasattr(c, "mpi") or not c.mpi:
        is_parallel = False
    else:
        is_parallel = True

    watermark = {
        "python_implementation": platform.python_implementation(),
        "python_version": platform.python_version(),
        "python_compiler": platform.python_compiler(),
        "platform_system": platform.system(),
        "platform_release": platform.release(),
        "platform_version": platform.version(),
        "platform_machine": platform.machine(),
        "platform_processor": platform.processor(),
        "platform_processor_count": cpu_count(),
        "platform_architecture": platform.architecture()[0],
        "platform_hostname": gethostname(),
        "date": strftime("%d/%m/%Y"),
        "time": strftime("%H:%M:%S"),
        "timezone": strftime("%Z"),
        "hdf5_version": h5py.version.hdf5_version,
        "parallel_h5py": is_parallel,
        "mpi_vendor": vendor[0] if vendor else None,
        "mpi_vendor_version":
        ".".join(map(str, vendor[1])) if vendor else None,
        "problematic_multiprocessing": is_multiprocessing_problematic(),
    }

    watermark["module_versions"] = {
        module: get_distribution(module).version
        for module in modules
    }
    if MPI is None:
        watermark["module_versions"]["mpi4py"] = None

    return watermark
Exemplo n.º 29
0
 def testGetEnvelope(self):
     for dtype in datatypes:
         try:
             envelope = dtype.Get_envelope()
         except NotImplementedError:
             self.skipTest('mpi-type-get_envelope')
         if ('LAM/MPI' == MPI.get_vendor()[0] and
             "COMPLEX" in dtype.name): continue
         ni, na, nd, combiner = envelope
         self.assertEqual(combiner, MPI.COMBINER_NAMED)
         self.assertEqual(ni, 0)
         self.assertEqual(na, 0)
         self.assertEqual(nd, 0)
         self.assertEqual(dtype.envelope, envelope)
         self.assertEqual(dtype.combiner, combiner)
         self.assertTrue(dtype.is_named)
         self.assertTrue(dtype.is_predefined)
         otype = dtype.decode()
         self.assertTrue(dtype is otype)
Exemplo n.º 30
0
 def testGetEnvelope(self):
     for dtype in datatypes:
         try:
             envelope = dtype.Get_envelope()
         except NotImplementedError:
             self.skipTest('mpi-type-get_envelope')
         if ('LAM/MPI' == MPI.get_vendor()[0] and "COMPLEX" in dtype.name):
             continue
         ni, na, nd, combiner = envelope
         self.assertEqual(combiner, MPI.COMBINER_NAMED)
         self.assertEqual(ni, 0)
         self.assertEqual(na, 0)
         self.assertEqual(nd, 0)
         self.assertEqual(dtype.envelope, envelope)
         self.assertEqual(dtype.combiner, combiner)
         self.assertTrue(dtype.is_named)
         self.assertTrue(dtype.is_predefined)
         otype = dtype.decode()
         self.assertTrue(dtype is otype)
Exemplo n.º 31
0
 def testPreallocate(self):
     ## XXX MPICH2 emits a nesting level warning
     ## when preallocating zero size.
     name, ver = MPI.get_vendor()
     if not (name == 'MPICH2' and ver < (1, 1, 0)):
         self.FILE.Preallocate(0)
     size = self.FILE.Get_size()
     self.assertEqual(size, 0)
     self.FILE.Preallocate(1)
     size = self.FILE.Get_size()
     self.assertEqual(size, 1)
     self.FILE.Preallocate(100)
     size = self.FILE.Get_size()
     self.assertEqual(size, 100)
     self.FILE.Preallocate(10)
     size = self.FILE.Get_size()
     self.assertEqual(size, 100)
     self.FILE.Preallocate(200)
     size = self.FILE.Get_size()
     self.assertEqual(size, 200)
Exemplo n.º 32
0
 def testPreallocate(self):
     ## MPICH2 1.0.x emits a nesting level warning
     ## when preallocating zero size.
     name, ver = MPI.get_vendor()
     if not (name == 'MPICH2' and ver  < (1,1,0)):
         self.FILE.Preallocate(0)
     size = self.FILE.Get_size()
     self.assertEqual(size, 0)
     self.FILE.Preallocate(1)
     size = self.FILE.Get_size()
     self.assertEqual(size, 1)
     self.FILE.Preallocate(100)
     size = self.FILE.Get_size()
     self.assertEqual(size, 100)
     self.FILE.Preallocate(10)
     size = self.FILE.Get_size()
     self.assertEqual(size, 100)
     self.FILE.Preallocate(200)
     size = self.FILE.Get_size()
     self.assertEqual(size, 200)
Exemplo n.º 33
0
def get_watermark():
    """
    Return information about the current system relevant for pyasdf.
    """
    vendor = MPI.get_vendor() if MPI else None

    c = h5py.get_config()
    if not hasattr(c, "mpi") or not c.mpi:
        is_parallel = False
    else:
        is_parallel = True

    watermark = {
        "python_implementation": platform.python_implementation(),
        "python_version": platform.python_version(),
        "python_compiler": platform.python_compiler(),
        "platform_system": platform.system(),
        "platform_release": platform.release(),
        "platform_version": platform.version(),
        "platform_machine": platform.machine(),
        "platform_processor": platform.processor(),
        "platform_processor_count": cpu_count(),
        "platform_architecture": platform.architecture()[0],
        "platform_hostname": gethostname(),
        "date": strftime('%d/%m/%Y'),
        "time": strftime('%H:%M:%S'),
        "timezone": strftime('%Z'),
        "hdf5_version": h5py.version.hdf5_version,
        "parallel_h5py": is_parallel,
        "mpi_vendor": vendor[0] if vendor else None,
        "mpi_vendor_version": ".".join(map(str, vendor[1]))
        if vendor else None,
        "problematic_multiprocessing": is_multiprocessing_problematic()
        }

    watermark["module_versions"] = {
        module: get_distribution(module).version for module in modules}
    if MPI is None:
        watermark["module_versions"]["mpi4py"] = None

    return watermark
Exemplo n.º 34
0
def is_mpd_running():

    name_of_the_vendor, version = MPI.get_vendor()
    if name_of_the_vendor == "MPICH2":
        must_check_mpd = True
        if "AMUSE_MPD_CHECK" in os.environ:
            must_check_mpd = os.environ["AMUSE_MPD_CHECK"] == "1"
        if "PMI_PORT" in os.environ:
            must_check_mpd = False
        if "HYDRA_CONTROL_FD" in os.environ:
            must_check_mpd = False

        if not must_check_mpd:
            return True
        try:
            process = subprocess.Popen(["mpdtrace"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            (output_string, error_string) = process.communicate()
            return not (process.returncode == 255)
        except OSError as ex:
            return True
    else:
        return True
Exemplo n.º 35
0
def is_mpd_running():

    name_of_the_vendor, version = MPI.get_vendor()
    if name_of_the_vendor == 'MPICH2':
        must_check_mpd = True
        if 'AMUSE_MPD_CHECK' in os.environ:
            must_check_mpd = os.environ['AMUSE_MPD_CHECK'] == '1'
        if 'PMI_PORT' in os.environ:
            must_check_mpd = False
        if 'HYDRA_CONTROL_FD' in os.environ:
            must_check_mpd = False

        if not must_check_mpd:
            return True
        try:
            process = subprocess.Popen(['mpdtrace'],
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
            (output_string, error_string) = process.communicate()
            return not (process.returncode == 255)
        except OSError as ex:
            return True
    else:
        return True
Exemplo n.º 36
0
import numpy, os, mpi4py, logging, sys
import mpi4py
mpi4py.rc.threads = False
from mpi4py import MPI
from messages import print_and_log
comm = MPI.COMM_WORLD
import blosc
from distutils.version import StrictVersion

logger = logging.getLogger(__name__)

MPI_VENDOR = MPI.get_vendor()
SHARED_MEMORY = (hasattr(MPI.Win, 'Allocate_shared')
                 and callable(getattr(MPI.Win, 'Allocate_shared')))


def check_if_cluster():
    from uuid import getnode as get_mac
    myip = numpy.array([numpy.int64(get_mac()) % 100000], dtype='int64')
    ips = all_gather_array(myip, comm, 1, 'int64')
    return not len(numpy.unique(ips)) == 1


def get_local_ring(local_only=False):
    ## First we need to identify machines in the MPI ring
    from uuid import getnode as get_mac
    myip = numpy.int64(get_mac()) % 100000
    is_local = False

    if local_only:
        master_ip = comm.bcast(numpy.array([myip], dtype='int64'), root=0)
Exemplo n.º 37
0
        for dtype in self.DATATYPES:
            if dtype != MPI.DATATYPE_NULL:
                self.assertRaisesMPI(self.ERR_TYPE, dtype.Free)
                self.assertTrue(dtype != MPI.DATATYPE_NULL)

    def testKeyvalInvalid(self):
        for dtype in self.DATATYPES:
            if dtype != MPI.DATATYPE_NULL:
                try:
                    self.assertRaisesMPI([MPI.ERR_KEYVAL, MPI.ERR_OTHER],
                                         dtype.Get_attr, MPI.KEYVAL_INVALID)
                except NotImplementedError:
                    self.skipTest('mpi-type-get_attr')


name, version = MPI.get_vendor()
if name == 'Open MPI':
    if version < (1, 4, 3):
        TestExcDatatype.DATATYPES = TestExcDatatype.DATATYPES[1:]
        TestExcDatatype.ERR_TYPE = MPI.ERR_INTERN

# --------------------------------------------------------------------


@unittest.skipMPI('msmpi(<=4.2.0)')
class TestExcStatus(BaseTestCase):
    def testGetCount(self):
        status = MPI.Status()
        self.assertRaisesMPI(MPI.ERR_TYPE, status.Get_count, MPI.DATATYPE_NULL)

    def testGetElements(self):
Exemplo n.º 38
0
            intercomm = MPI.Comm.Join(fd)
            client.close()
            if intercomm != MPI.COMM_NULL:
                self.assertEqual(intercomm.remote_size, 1)
                self.assertEqual(intercomm.size, 1)
                self.assertEqual(intercomm.rank, 0)
                if rank == 0:
                    message = TestDPM.message
                    root = MPI.ROOT
                else:
                    message = None
                    root = 0
                message = intercomm.bcast(message, root)
                if rank == 0:
                    self.assertEqual(message, None)
                else:
                    self.assertEqual(message, TestDPM.message)
                intercomm.Free()
        MPI.COMM_WORLD.Barrier()


MVAPICH2 = MPI.get_vendor()[0] == 'MVAPICH2'
try:
    if MVAPICH2: raise NotImplementedError
except NotImplementedError:
    unittest.disable(TestDPM, 'mpi-dpm')


if __name__ == '__main__':
    unittest.main()
Exemplo n.º 39
0
        for dtype in self.DATATYPES:
            if dtype != MPI.DATATYPE_NULL:
                self.assertRaisesMPI(self.ERR_TYPE, dtype.Free)
                self.assertTrue(dtype != MPI.DATATYPE_NULL)

    def testKeyvalInvalid(self):
        for dtype in self.DATATYPES:
            if dtype != MPI.DATATYPE_NULL:
                try:
                    self.assertRaisesMPI(
                        [MPI.ERR_KEYVAL, MPI.ERR_OTHER],
                        dtype.Get_attr, MPI.KEYVAL_INVALID)
                except NotImplementedError:
                    return

_name, _version = MPI.get_vendor()
if _name == 'Open MPI':
    if _version < (1, 4, 3):
        TestExcDatatype.DATATYPES  = TestExcDatatype.DATATYPES[1:]
        TestExcDatatype.ERR_TYPE   = MPI.ERR_INTERN

# --------------------------------------------------------------------

class TestExcStatus(unittest.TestCase):

    def testGetCount(self):
        status = MPI.Status()
        self.assertRaisesMPI(
            MPI.ERR_TYPE, status.Get_count, MPI.DATATYPE_NULL)

    def testGetElements(self):
Exemplo n.º 40
0
def ensure_mpd_is_running():
    if not is_mpd_running():
        name_of_the_vendor, version = MPI.get_vendor()
        if name_of_the_vendor == 'MPICH2':
            process = subprocess.Popen(['nohup','mpd'], close_fds=True)
Exemplo n.º 41
0
        self.WIN.Unlock_all()


class TestRMASelf(BaseTestRMA, unittest.TestCase):
    COMM = MPI.COMM_SELF

class TestRMAWorld(BaseTestRMA, unittest.TestCase):
    COMM = MPI.COMM_WORLD


try:
    MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free()
except NotImplementedError:
    del TestRMASelf, TestRMAWorld
else:
    name, version = MPI.get_vendor()
    if name == 'Open MPI':
        if version < (1,8,1):
            del TestRMASelf, TestRMAWorld
    elif name == 'MPICH2':
        if version < (1,5,0):
            del TestRMASelf, TestRMAWorld
        elif version >= (2,0,0) and MPI.VERSION < 3: # Intel MPI
            del TestRMASelf, TestRMAWorld
    elif MPI.Get_version() < (3,0):
        del TestRMASelf, TestRMAWorld


if __name__ == '__main__':
    unittest.main()
Exemplo n.º 42
0
            client.close()
            if intercomm != MPI.COMM_NULL:
                self.assertEqual(intercomm.remote_size, 1)
                self.assertEqual(intercomm.size, 1)
                self.assertEqual(intercomm.rank, 0)
                if rank == 0:
                    message = TestDPM.message
                    root = MPI.ROOT
                else:
                    message = None
                    root = 0
                message = intercomm.bcast(message, root)
                if rank == 0:
                    self.assertEqual(message, None)
                else:
                    self.assertEqual(message, TestDPM.message)
                intercomm.Free()
        MPI.COMM_WORLD.Barrier()


MVAPICH2 = MPI.get_vendor()[0] == 'MVAPICH2'
try:
    if MVAPICH2: raise NotImplementedError
    MPI.Close_port(MPI.Open_port())
except NotImplementedError:
    unittest.disable(TestDPM, 'mpi-dpm')


if __name__ == '__main__':
    unittest.main()
Exemplo n.º 43
0
BASE_NAME = options.mesh_path +"/"+ options.app_name  
DATAs     = {}  

for k, v in my_dictionary.iteritems():
  file = BASE_NAME+v 
  if os.path.isfile(file) and os.access(file, os.R_OK):
    Data = np.loadtxt(file)
    print "\t size:%d '%s'" % ( Data.shape[0], file ) 
    DATAs[k] = Data
  else:
    print "Either file is missing or is not readable '%s' " % file 
    sys.exit() 

#=================================================================| Mpi4py |===#
from mpi4py import MPI
print (MPI.get_vendor())
world_comm = MPI.COMM_WORLD

#-------------------------------------------------------------------------||---#
import Commdomm

class PyPlepp: 
  def __init__(self, _type, _namei, _namej, _comm, _dim=3):  
    self.dim = _dim  
    self.CD  = Commdomm.CommDom()
    self.CD.init()
    self.CD.set_app_type(   _type  ) 
    self.CD.set_app_name(   _namei ) 
    self.CD.set_world_comm( _comm  ) 

    self.lcomm = self.CD.set_mpi_comms()
Exemplo n.º 44
0
            assert v is win
            MPI.Win.Free(v)
        self.keyval = MPI.Win.Create_keyval(delete_fn=delete_fn)
        self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID)
        #
        win = MPI.Win.Create(MPI.BOTTOM, 1,
                             MPI.INFO_NULL, MPI.COMM_SELF)
        self.obj.Set_attr(self.keyval, win)
        self.assertTrue(win != null)
        self.obj.Delete_attr(self.keyval)
        self.assertTrue(win == null)


try:
    k = MPI.Datatype.Create_keyval()
    k = MPI.Datatype.Free_keyval(k)
except NotImplementedError:
    unittest.disable(BaseTestDatatypeAttr, 'mpi-type-attr')
SpectrumMPI = MPI.get_vendor()[0] == 'Spectrum MPI'
try:
    if SpectrumMPI: raise NotImplementedError
    MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free()
    k = MPI.Win.Create_keyval()
    k = MPI.Win.Free_keyval(k)
except (NotImplementedError, MPI.Exception):
    unittest.disable(TestWinAttr, 'mpi-win-attr')


if __name__ == '__main__':
    unittest.main()
Exemplo n.º 45
0
        self.assert_(base_f is not new_f)
        self.assert_(not base_f.done())
        self.assert_(not new_f.done())

        base_f.set_result(1)
        self.assert_(base_f.done())
        self.assert_(new_f.done())

        self.assert_(new_f.exception())
        with self.assertRaises(RuntimeError) as catcher:
            new_f.result()
        assert 'Circular future chain detected' in catcher.exception.args[0]


SKIP_POOL_TEST = False
name, version = MPI.get_vendor()
if name == 'Open MPI':
    if version < (3, 0, 0):
        SKIP_POOL_TEST = True
    if version == (4, 0, 0):
        SKIP_POOL_TEST = True
if name == 'MPICH':
    if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
        SKIP_POOL_TEST = True
if name == 'MVAPICH2':
    SKIP_POOL_TEST = True
if name == 'MPICH2':
    if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
        SKIP_POOL_TEST = True
if name == 'Microsoft MPI':
    if version < (8, 1, 0):
Exemplo n.º 46
0
from mpi4py import rc
del rc.initialize
del rc.threads
del rc.thread_level
del rc.finalize

from mpi4py import MPI
assert MPI.Is_initialized()
assert not MPI.Is_finalized()

import sys
name, _ = MPI.get_vendor()
if name == 'MPICH':
    assert MPI.Query_thread() == MPI.THREAD_MULTIPLE
if name == 'MPICH2' and sys.platform[:3] != 'win':
    assert MPI.Query_thread() == MPI.THREAD_MULTIPLE