Esempio n. 1
0
def test_thread_level():
    import sys
    flag = (MPI.Query_thread() == MPI.THREAD_MULTIPLE)
    flag = MPI.COMM_WORLD.bcast(flag, root=0)
    if not flag:
        if MPI.COMM_WORLD.Get_rank() == 0:
            sys.stderr.write("MPI does not provide enough thread support\n")
        sys.exit(0)
def init():
    """Initializes the runtime library """
    global torc_server_thread, torc_use_server
    global torc_last_qid
    global torc_num_workers
    global TORC_STEALING_ENABLED
    global TORC_SERVER_YIELDTIME, TORC_WORKER_YIELDTIME
    global _torc_inited

    if _torc_inited is True:
        return
    else:
        _torc_inited = True

    # MPI.Init_thread()

    provided = MPI.Query_thread()
    if MPI.COMM_WORLD.Get_rank() == 0:
        _torc_log.warning("MPI.Query_thread returns {}".format(provided))
        if provided < MPI.THREAD_MULTIPLE:
            _torc_log.warning(
                "Warning: MPI.Query_thread returns {} < {}".format(
                    provided, MPI.THREAD_MULTIPLE))
        else:
            _torc_log.warning(
                "Info: MPI.Query_thread returns MPI.THREAD_MULTIPLE")

    torc_num_workers = int(os.getenv("TORCPY_WORKERS", 1))

    flag = os.getenv("TORCPY_STEALING", "False")
    if flag == "True":
        TORC_STEALING_ENABLED = True
    else:
        TORC_STEALING_ENABLED = False

    TORC_SERVER_YIELDTIME = float(os.getenv("TORCPY_SERVER_YIELDTIME", 0.01))
    TORC_WORKER_YIELDTIME = float(os.getenv("TORCPY_WORKER_YIELDTIME", 0.01))

    torc_tls.id = 0
    main_task = dict()
    main_task["deps"] = 0
    main_task["mytask"] = id(main_task)
    main_task["parent"] = 0
    main_task["level"] = -1
    main_task["completed"] = []
    torc_tls.curr_task = main_task
    torc_last_qid = node_id() * torc_num_workers

    if num_nodes() == 1:
        torc_use_server = False

    if torc_use_server:
        torc_server_thread = threading.Thread(target=_server)
        torc_server_thread.start()
Esempio n. 3
0
 def testThreadLevels(self):
     levels = [
         MPI.THREAD_SINGLE, MPI.THREAD_FUNNELED, MPI.THREAD_SERIALIZED,
         MPI.THREAD_MULTIPLE
     ]
     for i in range(len(levels) - 1):
         self.assertTrue(levels[i] < levels[i + 1])
     try:
         provided = MPI.Query_thread()
         self.assertTrue(provided in levels)
     except NotImplementedError:
         self.skipTest('mpi-query_thread')
Esempio n. 4
0
    def main(self):
        MPI.Init_thread(MPI.THREAD_MULTIPLE)
        if MPI.Query_thread() != MPI.THREAD_MULTIPLE:
            print 'ERROR: make sure MPI is configured with thread support'
            self.terminate()

        self.read_commandline()
        self.init_logger()
        self.show_banner()
        self.list_solvers()
        self.init_defaults()
        if self.read_controlfile() and self.load_solver():
            self.initialise_solver()
            self.start_solver()
def test_thread_level():
    fn = "mpi_test.log"
    if MPI.COMM_WORLD.Get_rank() == 0:
        if not (MPI.Query_thread() == MPI.THREAD_MULTIPLE):
            with open(fn, 'w') as f:
                m = "MPI does not provide enough thread support\n"
                m += 'MPI.Query_thread()={}\n'.format(MPI.Query_thread)
                f.write(m)
                print(m)
            sys.stderr.write("MPI does not provide enough thread support\n")
        else:
            with open(fn, 'w') as f:
                m = 'MPI.Query_thread()={}\n'.format(MPI.Query_thread)
                f.write(m)
                print(m)
Esempio n. 6
0
 def testIsThreadMainInThread(self):
     try:
         provided = MPI.Query_thread()
     except NotImplementedError:
         self.skipTest('mpi-query_thread')
     self.testIsThreadMain()
     T = [threading.Thread(target=self.testIsThreadMain) for _ in range(5)]
     if provided == MPI.THREAD_MULTIPLE:
         for t in T:
             t.start()
         for t in T:
             t.join()
     elif provided == MPI.THREAD_SERIALIZED:
         for t in T:
             t.start()
             t.join()
     else:
         self.skipTest('mpi-thread_level')
Esempio n. 7
0
 def testIsThreadMainInThread(self):
     try:
         provided = MPI.Query_thread()
     except NotImplementedError:
         return
     if provided < self.REQUIRED:
         return
     T = []
     for i in range(5):
         t = Thread(target=self.testIsThreadMain,
                    args=(not _HAS_THREADING, ))
         T.append(t)
     if provided == MPI.THREAD_SERIALIZED:
         for t in T:
             t.start()
             t.join()
     elif provided == MPI.THREAD_MULTIPLE:
         for t in T:
             t.start()
         for t in T:
             t.join()
Esempio n. 8
0
 def testIsThreadMain(self):
     self._test_is(main=True)
     try:
         provided = MPI.Query_thread()
     except NotImplementedError:
         return
     if provided < self.REQUIRED:
         return
     T = []
     for i in range(5):
         t = Thread(target=self._test_is,
                    args=(not _HAS_THREADING, ),
                    verbose=_VERBOSE)
         T.append(t)
     if provided == MPI.THREAD_MULTIPLE:
         for t in T:
             t.start()
             t.join()
     else:
         for t in T:
             t.start()
         for t in T:
             t.join()
Esempio n. 9
0
from mpi4py import rc
del rc.initialize
del rc.threads
del rc.thread_level
del rc.finalize

from mpi4py import MPI
assert MPI.Is_initialized()
assert not MPI.Is_finalized()

import sys
name, _ = MPI.get_vendor()
if name == 'MPICH':
    assert MPI.Query_thread() == MPI.THREAD_MULTIPLE
if name == 'MPICH2' and sys.platform[:3] != 'win':
    assert MPI.Query_thread() == MPI.THREAD_MULTIPLE
Esempio n. 10
0
class BaseTestComm(object):
    def testContructor(self):
        comm = MPI.Comm(self.COMM)
        self.assertEqual(comm, self.COMM)
        self.assertFalse(comm is self.COMM)

    def testPyProps(self):
        comm = self.COMM
        self.assertEqual(comm.Get_size(), comm.size)
        self.assertEqual(comm.Get_rank(), comm.rank)
        self.assertEqual(comm.Is_intra(), comm.is_intra)
        self.assertEqual(comm.Is_inter(), comm.is_inter)
        self.assertEqual(comm.Get_topology(), comm.topology)

    def testGroup(self):
        comm = self.COMM
        group = self.COMM.Get_group()
        self.assertEqual(comm.Get_size(), group.Get_size())
        self.assertEqual(comm.Get_rank(), group.Get_rank())
        group.Free()
        self.assertEqual(group, MPI.GROUP_NULL)

    def testCloneFree(self):
        comm = self.COMM.Clone()
        comm.Free()
        self.assertEqual(comm, MPI.COMM_NULL)

    def testCompare(self):
        results = (MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL)
        ccmp = MPI.Comm.Compare(self.COMM, MPI.COMM_WORLD)
        self.assertTrue(ccmp in results)
        ccmp = MPI.Comm.Compare(self.COMM, self.COMM)
        self.assertEqual(ccmp, MPI.IDENT)
        comm = self.COMM.Dup()
        ccmp = MPI.Comm.Compare(self.COMM, comm)
        comm.Free()
        self.assertEqual(ccmp, MPI.CONGRUENT)

    def testIsInter(self):
        is_inter = self.COMM.Is_inter()
        self.assertTrue(type(is_inter) is bool)

    def testGetSetName(self):
        try:
            name = self.COMM.Get_name()
            self.COMM.Set_name('comm')
            self.assertEqual(self.COMM.Get_name(), 'comm')
            self.COMM.Set_name(name)
            self.assertEqual(self.COMM.Get_name(), name)
        except NotImplementedError:
            self.skipTest('mpi-comm-name')

    def testGetParent(self):
        try:
            parent = MPI.Comm.Get_parent()
        except NotImplementedError:
            self.skipTest('mpi-comm-get_parent')

    def testDupWithInfo(self):
        info = None
        self.COMM.Dup(info).Free()
        info = MPI.INFO_NULL
        self.COMM.Dup(info).Free()
        self.COMM.Dup_with_info(info).Free()
        info = MPI.Info.Create()
        self.COMM.Dup(info).Free()
        self.COMM.Dup_with_info(info).Free()
        info.Free()

    @unittest.skipMPI('mpich(<=3.1.0)', MPI.Query_thread() > MPI.THREAD_SINGLE)
    def testIDup(self):
        try:
            comm, request = self.COMM.Idup()
        except NotImplementedError:
            self.skipTest('mpi-comm-idup')
        request.Wait()
        ccmp = MPI.Comm.Compare(self.COMM, comm)
        comm.Free()
        self.assertEqual(ccmp, MPI.CONGRUENT)

    @unittest.skipMPI('mpich(<=3.1.0)', MPI.Query_thread() > MPI.THREAD_SINGLE)
    def testIDupWithInfo(self):
        try:
            comm, request = self.COMM.Idup_with_info(MPI.INFO_NULL)
        except NotImplementedError:
            self.skipTest('mpi-comm-idup-info')
        request.Wait()
        ccmp = MPI.Comm.Compare(self.COMM, comm)
        comm.Free()
        self.assertEqual(ccmp, MPI.CONGRUENT)
        #
        new_info = MPI.Info.Create()
        for info in (None, MPI.INFO_NULL, new_info):
            comm, request = self.COMM.Idup(info)
            request.Wait()
            ccmp = MPI.Comm.Compare(self.COMM, comm)
            comm.Free()
            self.assertEqual(ccmp, MPI.CONGRUENT)
        new_info.Free()

    def testGetSetInfo(self):
        #info = MPI.INFO_NULL
        #self.COMM.Set_info(info)
        info = MPI.Info.Create()
        self.COMM.Set_info(info)
        info.Free()
        info = self.COMM.Get_info()
        self.COMM.Set_info(info)
        info.Free()

    def testCreate(self):
        group = self.COMM.Get_group()
        comm = self.COMM.Create(group)
        ccmp = MPI.Comm.Compare(self.COMM, comm)
        self.assertEqual(ccmp, MPI.CONGRUENT)
        comm.Free()
        group.Free()

    @unittest.skipMPI('openmpi(<=1.8.1)')
    def testCreateGroup(self):
        group = self.COMM.Get_group()
        try:
            try:
                comm = self.COMM.Create_group(group)
                ccmp = MPI.Comm.Compare(self.COMM, comm)
                self.assertEqual(ccmp, MPI.CONGRUENT)
                comm.Free()
            finally:
                group.Free()
        except NotImplementedError:
            self.skipTest('mpi-comm-create_group')

    def testCreateFromGroup(self):
        group = self.COMM.Get_group()
        try:
            try:
                comm = MPI.Intracomm.Create_from_group(group)
                ccmp = MPI.Comm.Compare(self.COMM, comm)
                comm.Free()
                self.assertEqual(ccmp, MPI.CONGRUENT)
            finally:
                group.Free()
        except NotImplementedError:
            self.assertTrue(MPI.VERSION < 4)
            self.skipTest('mpi-comm-create_from_group')

    @unittest.skipMPI('openmpi(==2.0.0)')
    def testSplitTypeShared(self):
        try:
            MPI.COMM_SELF.Split_type(MPI.COMM_TYPE_SHARED).Free()
        except NotImplementedError:
            self.skipTest('mpi-comm-split_type')
        #comm = self.COMM.Split_type(MPI.UNDEFINED)
        #self.assertEqual(comm, MPI.COMM_NULL)
        comm = self.COMM.Split_type(MPI.COMM_TYPE_SHARED)
        self.assertNotEqual(comm, MPI.COMM_NULL)
        size = self.COMM.Get_size()
        rank = self.COMM.Get_rank()
        if size == 1:
            self.assertEqual(comm.size, 1)
            self.assertEqual(comm.rank, 0)
        comm.Free()
        for root in range(size):
            if rank == root:
                split_type = MPI.COMM_TYPE_SHARED
            else:
                split_type = MPI.UNDEFINED
            comm = self.COMM.Split_type(split_type)
            if rank == root:
                self.assertNotEqual(comm, MPI.COMM_NULL)
                self.assertEqual(comm.size, 1)
                self.assertEqual(comm.rank, 0)
                comm.Free()
            else:
                self.assertEqual(comm, MPI.COMM_NULL)

    def testSplitTypeHWGuided(self):
        try:
            MPI.COMM_SELF.Split_type(MPI.COMM_TYPE_SHARED).Free()
        except NotImplementedError:
            self.skipTest('mpi-comm-split_type')
        if MPI.COMM_TYPE_HW_GUIDED == MPI.UNDEFINED:
            self.skipTest("mpi-comm-split_type-hw_guided")
        split_type = MPI.COMM_TYPE_HW_GUIDED
        #
        comm = self.COMM.Split_type(split_type)
        self.assertEqual(comm, MPI.COMM_NULL)
        comm = self.COMM.Split_type(split_type, info=MPI.INFO_NULL)
        self.assertEqual(comm, MPI.COMM_NULL)
        info = MPI.Info.Create()
        comm = self.COMM.Split_type(split_type, info=info)
        self.assertEqual(comm, MPI.COMM_NULL)
        info.Set("foo", "bar")
        comm = self.COMM.Split_type(split_type, info=info)
        self.assertEqual(comm, MPI.COMM_NULL)
        info.Set("mpi_hw_resource_type", "@dont-thread-on-me@")
        comm = self.COMM.Split_type(split_type, info=info)
        self.assertEqual(comm, MPI.COMM_NULL)
        info.Free()
        #
        restype = "mpi_hw_resource_type"
        shmem = "mpi_shared_memory"
        info = MPI.Info.Create()
        info.Set(restype, shmem)
        comm = self.COMM.Split_type(split_type, info=info)
        self.assertNotEqual(comm, MPI.COMM_NULL)
        self.assertEqual(info.Get(restype), shmem)
        comm.Free()
        size = self.COMM.Get_size()
        rank = self.COMM.Get_rank()
        for root in range(size):
            if rank == root:
                split_type = MPI.COMM_TYPE_HW_GUIDED
            else:
                split_type = MPI.UNDEFINED
            comm = self.COMM.Split_type(split_type, info=info)
            self.assertEqual(info.Get(restype), shmem)
            if rank == root:
                self.assertNotEqual(comm, MPI.COMM_NULL)
                self.assertEqual(comm.size, 1)
                self.assertEqual(comm.rank, 0)
                comm.Free()
            else:
                self.assertEqual(comm, MPI.COMM_NULL)
        info.Free()

    def testSplitTypeHWUnguided(self):
        try:
            MPI.COMM_SELF.Split_type(MPI.COMM_TYPE_SHARED).Free()
        except NotImplementedError:
            self.skipTest('mpi-comm-split_type')
        if MPI.COMM_TYPE_HW_UNGUIDED == MPI.UNDEFINED:
            self.skipTest("mpi-comm-split_type-hw_unguided")
        hwcomm = [self.COMM]
        while len(hwcomm) < 32:
            rank = hwcomm[-1].Get_rank()
            info = MPI.Info.Create()
            comm = hwcomm[-1].Split_type(
                MPI.COMM_TYPE_HW_UNGUIDED,
                key=rank,
                info=info,
            )
            if comm != MPI.COMM_NULL:
                self.assertTrue(info.Get("mpi_hw_resource_type"))
                self.assertTrue(comm.Get_size() < hwcomm[-1].Get_size())
            info.Free()
            if comm == MPI.COMM_NULL:
                break
            hwcomm.append(comm)
        for comm in hwcomm[1:]:
            comm.Free()
Esempio n. 11
0
    def test2(self):
        convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun,
                                                 149.5e6 | units.km)

        bhtree = BHTree(convert_nbody)
        bhtree.initialize_code()
        bhtree.eps2_for_gravity = 0.001

        bhtree_particles = self.new_system_sun_and_earth()
        bhtree.particles.add_particles(bhtree_particles)

        if bhtree.legacy_interface.channel_type == 'mpi':
            from mpi4py import MPI
            if not MPI.Query_thread() == MPI.THREAD_MULTIPLE:
                bhtree.stop()
                self.skip(
                    "can only test parallel with multiple thread support in mpi implementation"
                )

        hermite = Hermite(convert_nbody)
        hermite.dt_dia = 5000
        hermite.commit_parameters()

        hermite_particles = self.new_system_sun_and_earth()
        hermite.particles.add_particles(hermite_particles)

        thread1 = threading.Thread(target=self.evolve_model_unit_day,
                                   args=(bhtree, bhtree_particles, 10))
        thread2 = threading.Thread(target=self.evolve_model_unit_day,
                                   args=(hermite, hermite_particles, 10))

        thread1.start()
        thread2.start()

        thread1.join()
        thread2.join()

        if HAS_MATPLOTLIB:
            figure = pyplot.figure()
            plot = figure.add_subplot(1, 1, 1)

            earth = bhtree_particles[1]
            x_points = earth.get_timeline_of_attribute("x")
            y_points = earth.get_timeline_of_attribute("y")
            x_points_in_AU = [t_x[1].value_in(units.AU) for t_x in x_points]
            y_points_in_AU = [t_x1[1].value_in(units.AU) for t_x1 in y_points]

            plot.scatter(x_points_in_AU, y_points_in_AU, color="b", marker='o')

            earth = hermite_particles[1]
            x_points = earth.get_timeline_of_attribute("x")
            y_points = earth.get_timeline_of_attribute("y")
            x_points_in_AU = [t_x2[1].value_in(units.AU) for t_x2 in x_points]
            y_points_in_AU = [t_x3[1].value_in(units.AU) for t_x3 in y_points]

            plot.scatter(x_points_in_AU, y_points_in_AU, color="g", marker='o')

            plot.set_xlim(-1.5, 1.5)
            plot.set_ylim(-1.5, 1.5)

            test_results_path = self.get_path_to_results()
            output_file = os.path.join(test_results_path,
                                       "parallel-earth-sun.svg")
            figure.savefig(output_file)

        bhtree.stop()
        hermite.stop()
        bhtree.stop()
Esempio n. 12
0
try:
    import threading
except ImportError:
    raise SystemExit("threading module not available")
try:
    import numpy
except ImportError:
    raise SystemExit("NumPy package not available")

from mpi4py import MPI
if MPI.Query_thread() < MPI.THREAD_MULTIPLE:
    raise SystemExit("MPI does not provide enough thread support")

send_msg = numpy.arange(1000000, dtype='i')
recv_msg = numpy.zeros_like(send_msg)

start_event = threading.Event()


def self_send():
    start_event.wait()
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    comm.Send([send_msg, MPI.INT], dest=rank, tag=0)


def self_recv():
    start_event.wait()
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    comm.Recv([recv_msg, MPI.INT], source=rank, tag=0)
Esempio n. 13
0
class MPIEnvironment:

    # Static variables #################################################################################################

    # Set hostname
    hostname = socket.gethostname()

    # Initialization
    mpi_initialized = False
    try:
        # don't load mpi unless we are already running under mpi
        # trying to load a broken mpi installation will abort the process not
        # giving us a chance to run in the serial mode
        # testing mpi via a forked import causes deadlock on process end when
        # running test_mpi4casa[test_server_not_responsive]
        if 'OMPI_COMM_WORLD_RANK' not in os.environ:
            raise ValueError('MPI disabled')

        # Set mpi4py runtime configuration
        from mpi4py import rc as __mpi_runtime_config
        # Automatic MPI initialization at import time
        __mpi_runtime_config.initialize = True
        # Request for thread support at MPI initialization
        __mpi_runtime_config.threaded = True
        # Level of thread support to request at MPI initialization
        # "single" : use MPI_THREAD_SINGLE
        # "funneled" : use MPI_THREAD_FUNNELED
        # "serialized" : use MPI_THREAD_SERIALIZED
        # "multiple" : use MPI_THREAD_MULTIPLE
        __mpi_runtime_config.thread_level = 'multiple'
        # Automatic MPI finalization at exit time
        __mpi_runtime_config.finalize = False

        # Import mpi4py and thus initialize MPI
        from mpi4py import MPI as __mpi_factory  # NOTE: This is a private variable to avoid uncontrolled access to MPI

        # Get world size and processor rank
        mpi_world_size = __mpi_factory.COMM_WORLD.Get_size()
        mpi_processor_rank = __mpi_factory.COMM_WORLD.Get_rank()

        # Get pid
        mpi_pid = os.getpid()

        # Get version and vendor info
        mpi_version_info = __mpi_factory.Get_version()
        mpi_vendor_info = __mpi_factory.get_vendor()
        mpi_thread_safe_level = __mpi_factory.Query_thread()

        # Prepare version info string
        mpi_version_str = str(mpi_version_info[0])
        for tuple_element_index in range(1, len(mpi_version_info)):
            mpi_version_str = mpi_version_str + "." + str(
                mpi_version_info[tuple_element_index])

        # Prepare vendor info string
        mpi_vendor_str = str(mpi_vendor_info[0])
        if len(mpi_vendor_info) > 1:
            mpi_vendor_version = mpi_vendor_info[1]
            mpi_vendor_version_str = str(mpi_vendor_version[0])
            for tuple_element_index in range(1, len(mpi_vendor_version)):
                mpi_vendor_version_str = mpi_vendor_version_str + "." + str(
                    mpi_vendor_version[tuple_element_index])
            mpi_vendor_str = mpi_vendor_str + " v" + mpi_vendor_version_str

        # Set initialization flag
        mpi_initialized = True
    except Exception, instance:
        mpi_initialization_error_msg = traceback.format_exc()
        __mpi_factory = None
        mpi_world_size = 1
        mpi_processor_rank = 0
        mpi_version_info = None
        mpi_vendor_info = None
        mpi_version_str = ""
        mpi_vendor_str = ""
        mpi_vendor_version_str = ""
        mpi_thread_safe_level = -1
        mpi_initialized = False
Esempio n. 14
0
    def setUp(self):
        self.COMM = self.COMM.Dup()
    def tearDown(self):
        self.COMM.Free()
        del self.COMM


class TestCCOObjSelf(BaseTestCCOObj, unittest.TestCase):
    COMM = MPI.COMM_SELF

class TestCCOObjWorld(BaseTestCCOObj, unittest.TestCase):
    COMM = MPI.COMM_WORLD


class TestCCOObjSelfDup(BaseTestCCOObjDup, unittest.TestCase):
    COMM = MPI.COMM_SELF

class TestCCOObjWorldDup(BaseTestCCOObjDup, unittest.TestCase):
    COMM = MPI.COMM_WORLD


_name, _version = MPI.get_vendor()
if _name == 'Open MPI':
    if _version < (1, 4, 0):
        if MPI.Query_thread() > MPI.THREAD_SINGLE:
            del TestCCOObjWorldDup


if __name__ == '__main__':
    unittest.main()
Esempio n. 15
0
class BaseTestComm(object):
    def testContructor(self):
        comm = MPI.Comm(self.COMM)
        self.assertEqual(comm, self.COMM)
        self.assertFalse(comm is self.COMM)

    def testPyProps(self):
        comm = self.COMM
        self.assertEqual(comm.Get_size(), comm.size)
        self.assertEqual(comm.Get_rank(), comm.rank)
        self.assertEqual(comm.Is_intra(), comm.is_intra)
        self.assertEqual(comm.Is_inter(), comm.is_inter)
        self.assertEqual(comm.Get_topology(), comm.topology)

    def testGroup(self):
        comm = self.COMM
        group = self.COMM.Get_group()
        self.assertEqual(comm.Get_size(), group.Get_size())
        self.assertEqual(comm.Get_rank(), group.Get_rank())
        group.Free()
        self.assertEqual(group, MPI.GROUP_NULL)

    def testCloneFree(self):
        comm = self.COMM.Clone()
        comm.Free()
        self.assertEqual(comm, MPI.COMM_NULL)

    def testCompare(self):
        results = (MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL)
        ccmp = MPI.Comm.Compare(self.COMM, MPI.COMM_WORLD)
        self.assertTrue(ccmp in results)
        ccmp = MPI.Comm.Compare(self.COMM, self.COMM)
        self.assertEqual(ccmp, MPI.IDENT)
        comm = self.COMM.Dup()
        ccmp = MPI.Comm.Compare(self.COMM, comm)
        comm.Free()
        self.assertEqual(ccmp, MPI.CONGRUENT)

    def testIsInter(self):
        is_inter = self.COMM.Is_inter()
        self.assertTrue(type(is_inter) is bool)

    def testGetSetName(self):
        try:
            name = self.COMM.Get_name()
            self.COMM.Set_name('comm')
            self.assertEqual(self.COMM.Get_name(), 'comm')
            self.COMM.Set_name(name)
            self.assertEqual(self.COMM.Get_name(), name)
        except NotImplementedError:
            self.skipTest('mpi-comm-name')

    def testGetParent(self):
        try:
            parent = MPI.Comm.Get_parent()
        except NotImplementedError:
            self.skipTest('mpi-comm-get_parent')

    def testDupWithInfo(self):
        info = None
        self.COMM.Dup(info).Free()
        info = MPI.INFO_NULL
        self.COMM.Dup(info).Free()
        self.COMM.Dup_with_info(info).Free()
        info = MPI.Info.Create()
        self.COMM.Dup(info).Free()
        self.COMM.Dup_with_info(info).Free()
        info.Free()

    @unittest.skipMPI('mpich(<=3.1.0)', MPI.Query_thread() > MPI.THREAD_SINGLE)
    def testIDup(self):
        try:
            comm, request = self.COMM.Idup()
        except NotImplementedError:
            self.skipTest('mpi-comm-idup')
        request.Wait()
        ccmp = MPI.Comm.Compare(self.COMM, comm)
        comm.Free()
        self.assertEqual(ccmp, MPI.CONGRUENT)

    def testGetSetInfo(self):
        #info = MPI.INFO_NULL
        #self.COMM.Set_info(info)
        info = MPI.Info.Create()
        self.COMM.Set_info(info)
        info.Free()
        info = self.COMM.Get_info()
        self.COMM.Set_info(info)
        info.Free()

    def testCreate(self):
        group = self.COMM.Get_group()
        comm = self.COMM.Create(group)
        ccmp = MPI.Comm.Compare(self.COMM, comm)
        self.assertEqual(ccmp, MPI.CONGRUENT)
        comm.Free()
        group.Free()

    @unittest.skipMPI('openmpi(<=1.8.1)')
    def testCreateGroup(self):
        group = self.COMM.Get_group()
        try:
            try:
                comm = self.COMM.Create_group(group)
                ccmp = MPI.Comm.Compare(self.COMM, comm)
                self.assertEqual(ccmp, MPI.CONGRUENT)
                comm.Free()
            finally:
                group.Free()
        except NotImplementedError:
            self.skipTest('mpi-comm-create_group')

    @unittest.skipMPI('openmpi(==2.0.0)')
    def testSplitType(self):
        try:
            MPI.COMM_SELF.Split_type(MPI.COMM_TYPE_SHARED).Free()
        except NotImplementedError:
            self.skipTest('mpi-comm-split_type')
        #comm = self.COMM.Split_type(MPI.UNDEFINED)
        #self.assertEqual(comm, MPI.COMM_NULL)
        comm = self.COMM.Split_type(MPI.COMM_TYPE_SHARED)
        self.assertNotEqual(comm, MPI.COMM_NULL)
        size = self.COMM.Get_size()
        rank = self.COMM.Get_rank()
        if size == 1:
            self.assertEqual(comm.size, 1)
            self.assertEqual(comm.rank, 0)
        comm.Free()
        for root in range(size):
            if rank == root:
                split_type = MPI.COMM_TYPE_SHARED
            else:
                split_type = MPI.UNDEFINED
            comm = self.COMM.Split_type(split_type)
            if rank == root:
                self.assertNotEqual(comm, MPI.COMM_NULL)
                self.assertEqual(comm.size, 1)
                self.assertEqual(comm.rank, 0)
                comm.Free()
            else:
                self.assertEqual(comm, MPI.COMM_NULL)
Esempio n. 16
0
        self.assertEqual(eh, MPI.ERRORS_ARE_FATAL)
        eh.Free()
        MPI.FILE_NULL.Set_errhandler(MPI.ERRORS_RETURN)
        eh = MPI.FILE_NULL.Get_errhandler()
        self.assertEqual(eh, MPI.ERRORS_RETURN)
        eh.Free()


class TestFileSelf(BaseTestFile, unittest.TestCase):
    COMM = MPI.COMM_SELF
    prefix = BaseTestFile.prefix + ('-%d' % MPI.COMM_WORLD.Get_rank())


_name, _version = MPI.get_vendor()
if _name == 'Open MPI':
    if (_version <= (1, 2, 8) and MPI.Query_thread() > MPI.THREAD_SINGLE):
        del BaseTestFile.testPreallocate
        del BaseTestFile.testGetSetInfo
        del BaseTestFile.testGetSetAtomicity
        del BaseTestFile.testSync
        del BaseTestFile.testGetAmode
        del BaseTestFile.testGetSetSize
        del BaseTestFile.testGetSetView
        del BaseTestFile.testGetByteOffset
        del BaseTestFile.testGetTypeExtent
        del BaseTestFile.testSeekGetPosition
        del BaseTestFile.testSeekGetPositionShared
else:
    try:
        dummy = BaseTestFile()
        dummy.COMM = MPI.COMM_SELF
Esempio n. 17
0
    def run(self):
        try:
            from mpi4py import MPI
        except ImportError:
            _logger.error("Unable to import mpi4py. Check if the mpi4py module is in your PYTHONPATH or use a different communication manager. DTM will now exit.")
            self.commReadyEvent.set()
            self.exitStatus.set()       # Set exit at the beginning
            return

        def mpiSend(msg, dest):
            # Pickle and send over MPI
            arrayBuf = array.array('b')
            arrayBuf.fromstring(cPickle.dumps(msg, cPickle.HIGHEST_PROTOCOL))

            b = MPI.COMM_WORLD.Isend([arrayBuf, MPI.CHAR], dest=dest, tag=self.msgSendTag)
            if self.traceMode:
                etree.SubElement(self.traceTo, "msg", {"direc" : "out", "type" : str(msg.msgType), "otherWorker" : str(dest), "msgtag" : str(self.msgSendTag), "time" : repr(time.time())})

            self.msgSendTag += 1
            return b, arrayBuf

        self.pSize = MPI.COMM_WORLD.Get_size()
        self.currentId = MPI.COMM_WORLD.Get_rank()

        MPI.COMM_WORLD.Barrier()

        self.commReadyEvent.set()   # Notify the main thread that we are ready

        if self.currentId == 0 and MPI.Query_thread() > 0:
            # Warn only once
            _logger.warning("MPI was initialized with a thread level of %i, which is higher than MPI_THREAD_SINGLE."
            " The current MPI implementations do not always handle well the MPI_THREAD_MULTIPLE or MPI_THREAD_SERIALIZED modes."
            " As DTM was designed to work with the base, safe mode (MPI_THREAD_SINGLE), it is strongly suggested to change"
            " the 'thread_level' variable or your mpi4py settings in 'site-packages/mpi4py/rc.py', unless you have strong"
            " motivations to keep that setting. This may bring both stability and performance improvements.", MPI.Query_thread())

        lRecvWaiting = []
        lSendWaiting = []
        countSend = 0
        countRecv = 0
        lMessageStatus = MPI.Status()
        working = True

        countRecvNotTransmit = 0
        countRecvTimeInit = time.time()

        while working:
            recvSomething = False
            sendSomething = False

            if self.exitStatus.is_set():    # Exiting
                # Warning : the communication thread MUST clear the sendQ
                # BEFORE leaving (the exiting orders must be send)
                working = False

            while len(lRecvWaiting) < DTM_CONCURRENT_RECV_LIMIT and MPI.COMM_WORLD.Iprobe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=lMessageStatus):
                # We received something
                lBuf = array.array('b', (0,))
                lBuf = lBuf * lMessageStatus.Get_elements(MPI.CHAR)

                lRecvWaiting.append((lBuf, MPI.COMM_WORLD.Irecv([lBuf, MPI.CHAR], source=lMessageStatus.Get_source(), tag=lMessageStatus.Get_tag()), lMessageStatus.Get_tag()))

                lMessageStatus = MPI.Status()
                recvSomething = True


            for i, reqTuple in enumerate(lRecvWaiting):
                if reqTuple[1].Test():
                    countRecv += 1
                    dataS = cPickle.loads(reqTuple[0].tostring())
                    if self.traceMode:
                        etree.SubElement(self.traceTo, "msg", {"direc" : "in", "type" : str(dataS.msgType), "otherWorker" : str(dataS.senderWid), "msgtag" : str(reqTuple[2]), "time" : repr(time.time())})
                    self.recvQ.put(dataS)
                    lRecvWaiting[i] = None
                    recvSomething = True
                    # Wake up the main thread if there's a sufficient number
                    # of pending receives
                    countRecvNotTransmit += 1


            if countRecvNotTransmit > 50 or (time.time() - countRecvTimeInit > 0.1 and countRecvNotTransmit > 0):
                countRecvNotTransmit = 0
                countRecvTimeInit = time.time()
                self.wakeUpMainThread.set()

            lRecvWaiting = filter(lambda d: not d is None, lRecvWaiting)

            while len(lSendWaiting) < DTM_CONCURRENT_SEND_LIMIT:
                # Send all pending sends, under the limit of
                # DTM_CONCURRENT_SEND_LIMIT
                try:
                    sendMsg = self.sendQ.get_nowait()
                    countSend += 1
                    sendMsg.sendTime = time.time()
                    commA, buf1 = mpiSend(sendMsg, sendMsg.receiverWid)
                    lSendWaiting.append((commA, buf1))
                    sendSomething = True
                except Queue.Empty:
                    break

            lSendWaiting = filter(lambda d: not d[0].Test(), lSendWaiting)

            if not recvSomething:
                time.sleep(self.random.uniform(DTM_MPI_MIN_LATENCY, DTM_MPI_MAX_LATENCY))

        while len(lSendWaiting) > 0:
            # Send the lasts messages before shutdown
            lSendWaiting = filter(lambda d: not d[0].Test(), lSendWaiting)
            time.sleep(self.random.uniform(DTM_MPI_MIN_LATENCY, DTM_MPI_MAX_LATENCY))

        del lSendWaiting
        del lRecvWaiting
Esempio n. 18
0
            comm.Free()


class TestP2PBufSelf(BaseTestP2PBuf, unittest.TestCase):
    COMM = MPI.COMM_SELF


class TestP2PBufWorld(BaseTestP2PBuf, unittest.TestCase):
    COMM = MPI.COMM_WORLD


class TestP2PBufSelfDup(TestP2PBufSelf):
    def setUp(self):
        self.COMM = MPI.COMM_SELF.Dup()

    def tearDown(self):
        self.COMM.Free()


@unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE)
class TestP2PBufWorldDup(TestP2PBufWorld):
    def setUp(self):
        self.COMM = MPI.COMM_WORLD.Dup()

    def tearDown(self):
        self.COMM.Free()


if __name__ == '__main__':
    unittest.main()
Esempio n. 19
0
from mpi4py import rc
rc.initialize = False

from mpi4py import MPI
assert not MPI.Is_initialized()
assert not MPI.Is_finalized()

level = MPI.Init_thread(MPI.THREAD_MULTIPLE)
assert MPI.Query_thread() == level
assert MPI.Is_initialized()
assert not MPI.Is_finalized()

MPI.Finalize()
assert MPI.Is_initialized()
assert MPI.Is_finalized()