Пример #1
0
    def setup(self):
        self.dt = zeros(1, float)
        self.dtClient = zeros(1, float)

        self.procID = MPI.COMM_WORLD.Get_rank()
        #connect to server first get parent and port name then connect to port name
        self.PARENT = MPI.Comm.Get_parent()
        assert self.PARENT != MPI.COMM_NULL
        self.portName = str(0)
        #print " ServerSide: self.portName = ", self.portName
        #print " ServerSide: self.portName = ", self.portName.tostring();
        #get port name from parent (sent by MPM)
        if self.procID == 0:
            self.portName = MPI.Open_port(MPI.INFO_NULL)
            print " Server Side: self.portName = ", self.portName
            print "type= ", type(self.portName)
            self.PARENT.send(self.portName, dest=0, tag=7776)
        #print " Server Side: self.portName = ", self.portName.tostring();
        #Connect this port
        self.SERVER_COMM_CLIENT = MPI.COMM_WORLD.Accept(self.portName,
                                                        MPI.INFO_NULL,
                                                        root=0)
        self.remoteSize = self.SERVER_COMM_CLIENT.Get_remote_size()

        #solidBoundary mesh site
        self.solidMesh.setNodeRepeationArrayCoupling(self.solidBoundaryMesh)
        self.solidMesh.setCommonFacesMap(self.solidBoundaryMesh)
        self.createSolidForceBVFields()

        if not MPI.COMM_WORLD.Get_rank():
            print "from ServerSide, reporting remoteSize = ", self.remoteSize
Пример #2
0
def tvb_connection(comm, root, info, logger_master, path_to_files_send):
    '''
    MPI inter communicator to TVB
    
    '''
    logger_master.info('Translate SEND: before open_port')
    if comm.Get_rank() == 0:
        ### Connection to simulation (incoming data)
        port_send = MPI.Open_port(info)
        logger_master.info('Translate SEND: after open_port : '+port_send)
        # Write file configuration of the port
        fport = open(path_to_files_send, "w+")
        fport.write(port_send)
        fport.close()
        pathlib.Path(path_to_files_send+'.unlock').touch()
        logger_master.info('Translate SEND: path_file: ' + path_to_files_send)
    else:
        port_send = None
    
    # broadcast port info, accept connection on all ranks!
    # necessary to avoid problems with information about the mpi rank in open port file.
    port_send = comm.bcast(port_send,root)
    logger_master.info('Translate SEND: Rank ' + str(comm.Get_rank()) + 'accepting connection on: ' + port_send)
    comm_sender = comm.Accept(port_send, info, root) 
    logger_master.info('Translate SEND: Simulation client connected to' + str(comm_sender.Get_rank()))
    
    return comm_sender, port_send
Пример #3
0
def nest_connection(comm, root, info, logger_master, path_to_files_receive):
    '''
    MPI inter communicator to NEST
    
    '''
    logger_master.info('Translate Receive: before open_port')
    if comm.Get_rank() == 0:
        ### Connection to simulation (incoming data)
        port_receive = MPI.Open_port(info)
        logger_master.info('Translate Receive: after open_port: '+port_receive)
        # Write file configuration of the port
        fport = open(path_to_files_receive, "w+")
        fport.write(port_receive)
        fport.close()
        pathlib.Path(path_to_files_receive+'.unlock').touch()
        logger_master.info('Translate Receive: path_file: ' + path_to_files_receive)
    else:
        port_receive = None
    
    # broadcast port info, accept connection on all ranks!
    # necessary to avoid problems with information about the mpi rank in open port file.
    port_receive = comm.bcast(port_receive,root) # TODO: ask Lena if this is needed/correct.
    logger_master.info('Translate Receive: Rank ' + str(comm.Get_rank()) + ' accepting connection on: ' + port_receive)
    comm_receiver = comm.Accept(port_receive, info, root) 
    logger_master.info('Translate Receive: Simulation client connected to' + str(comm_receiver.Get_rank()))
    
    return comm_receiver, port_receive
Пример #4
0
def main_server(COMM):
    nprocs = COMM.Get_size()
    myrank = COMM.Get_rank()

    service, port, info = None, None, MPI.INFO_NULL
    if myrank == 0:
        port = MPI.Open_port(info)
        log(COMM, "open port '%s'", port)
        service = 'cpi'
        MPI.Publish_name(service, info, port)
        log(COMM, "service '%s' published.", service)
    else:
        port = ''

    log(COMM, "waiting for client connection ...")
    icomm = COMM.Accept(port, info, root=0)
    log(COMM, "client connection accepted.")

    worker(icomm)

    log(COMM, "disconnecting from client ...")
    icomm.Disconnect()
    log(COMM, "client disconnected.")

    if myrank == 0:
        MPI.Unpublish_name(service, info, port)
        log(COMM, "service '%s' unpublished", port)
        MPI.Close_port(port)
        log(COMM, "closed  port '%s' ", port)
Пример #5
0
 def testNamePublishing(self):
     rank = MPI.COMM_WORLD.Get_rank()
     service = "mpi4py-%d" % rank
     port = MPI.Open_port()
     MPI.Publish_name(service, port)
     found =  MPI.Lookup_name(service)
     self.assertEqual(port, found)
     MPI.Unpublish_name(service, port)
     MPI.Close_port(port)
Пример #6
0
def badport():
    if MPI.get_vendor()[0] != 'MPICH':
        return False
    try:
        port = MPI.Open_port()
        MPI.Close_port(port)
    except:
        port = ""
    return port == ""
Пример #7
0
    def setup(self):
         self.dt       = zeros(1,float)
         self.dtClient = zeros(1,float)
       
         self.procID = MPI.COMM_WORLD.Get_rank()
         #connect to server first get parent and port name then connect to port name
         self.PARENT = MPI.Comm.Get_parent()
         assert self.PARENT != MPI.COMM_NULL
	 assert MPI.COMM_WORLD.Get_size() == 1
         self.portName = str(0) 
         #print " ServerSide: self.portName = ", self.portName 
         #print " ServerSide: self.portName = ", self.portName.tostring();
          #get port name from parent (sent by MPM)
         if  self.procID == 0:
	   self.portName = MPI.Open_port(MPI.INFO_NULL)
           print " Server Side: self.portName = ", self.portName 
	   print "type= ", type(self.portName)
           self.PARENT.send(self.portName, dest=0, tag=7776)
         #print " Server Side: self.portName = ", self.portName.tostring();
         #Connect this port
         self.SERVER_COMM_CLIENT = MPI.COMM_WORLD.Accept(self.portName, MPI.INFO_NULL, root=0) 
         self.remoteSize   = self.SERVER_COMM_CLIENT.Get_remote_size()
        
	 #get coordinate from goemField
         self.coordA = self.solidBoundaryMesh.getNodeCoordinatesPtr()
         self.coord  = self.coordA.asNumPyArray()
	 self.coordRecvBuf = self.coord.copy() 
#         self.coordA  = self.geomFields.coordinate[self.solidBoundaryMesh.getNodes()]
#         self.coord   = self.coordA.asNumPyArray()
#         self.sendBuf = self.coord.copy()


	 #velocity field, adding face velocities
	 self.vField = fvmbaseExt.Field("velocity")
	 self.faces  = self.solidBoundaryMesh.getFaces()
	 self.nfaces = self.faces.getCount()
	 area  = self.geomField.area[self.faces]

         #initialize force to zero	
	 solidCells = self.solidMesh.getCells()
         forcePlate = self.plateField.force[solidCells].asNumPyArray() 
         forcePlate[:] = 0.0
	 thickness = self.plateField.thickness[solidCells].asNumPyArray()
         thickness[:] = self.thickness

         #ading 
         self.forceA  = area.newSizedClone(self.nfaces)
	 self.force   = self.forceA.asNumPyArray()
	 self.sendForceBuf = self.force.copy()
	 if not MPI.COMM_WORLD.Get_rank():
   	     print "from ServerSide, reporting remoteSize = ", self.remoteSize
Пример #8
0
def _open_port_accept_connection(comm, root, info, logger_master,
                                 path_to_files):
    '''
    General MPI Server-Client connection.
    Opens a port and writes the details to file. Then accepts an incoming connection
    from another application on this port. The resulting INTER communicator is returned.
    
    In some MPI implementations, information about the rank is encoded in the port infos.
    Therefore only rank 0 opens the port and broadcasts the relevant info to all other ranks.
    So a M:N connection between two MPI applications is possible.

    :param comm: the INTRA communicator of the calling application ('server') which opens and accepts the connection
    :param root: the root rank on which the 'main' connection before broadcast in done
    :param info: MPI info object
    :param logger_master: the master logger of this cosim run
    :param path_to_files: location of the files 
    
    :return intra_comm: the newly created intra communicator between the two applications
    :return port: the port information, needed to properly close the connection after the job
    '''
    logger_master.info('Transformer: before open port')
    if comm.Get_rank() == root:
        port = MPI.Open_port(info)
        logger_master.info('Transformer: after open port, port details:' +
                           port)
        fport = open(path_to_files, "w+")
        fport.write(port)
        fport.close()
        pathlib.Path(path_to_files + '.unlock').touch()
        logger_master.info('Transformer: path to file with port info:' +
                           path_to_files)
    else:
        port = None
    ### NOTE: control print to console.
    print("RichEndPoint\n -- port opened and file created:", path_to_files,
          "\n -- I'm rank", comm.Get_rank())
    sys.stdout.flush()
    # broadcast port info, accept connection on all ranks!
    # necessary to avoid conflicts in the port file -> contains MPI-Rank infos
    port = comm.bcast(port, root)
    print('Transformer: Rank ' + str(comm.Get_rank()) +
          ' accepting connection on: ' + port)
    sys.stdout.flush()
    intra_comm = comm.Accept(port, info, root)
    logger_master.info('Transformer: Simulation client connected to' +
                       str(intra_comm.Get_rank()))

    return intra_comm, port
Пример #9
0
 def testConnectAccept(self):
     comm_self = MPI.COMM_SELF
     comm_world = MPI.COMM_WORLD
     wsize = comm_world.Get_size()
     wrank = comm_world.Get_rank()
     if wsize == 1: return
     group_world = comm_world.Get_group()
     group = group_world.Excl([0])
     group_world.Free()
     comm = comm_world.Create(group)
     group.Free()
     if wrank == 0:
         self.assertEqual(comm, MPI.COMM_NULL)
     else:
         self.assertNotEqual(comm, MPI.COMM_NULL)
         self.assertEqual(comm.size, comm_world.size - 1)
         self.assertEqual(comm.rank, comm_world.rank - 1)
     if wrank == 0:
         port = comm_world.recv(source=1)
         intercomm = comm_self.Connect(port)
         self.assertEqual(intercomm.remote_size, comm_world.size - 1)
         self.assertEqual(intercomm.size, 1)
         self.assertEqual(intercomm.rank, 0)
     else:
         if wrank == 1:
             port = MPI.Open_port()
             comm_world.send(port, dest=0)
         else:
             port = None
         intercomm = comm.Accept(port, root=0)
         if wrank == 1:
             MPI.Close_port(port)
         self.assertEqual(intercomm.remote_size, 1)
         self.assertEqual(intercomm.size, comm_world.size - 1)
         self.assertEqual(intercomm.rank, comm.rank)
         comm.Free()
     if wrank == 0:
         message = TestDPM.message
         root = MPI.ROOT
     else:
         message = None
         root = 0
     message = intercomm.bcast(message, root)
     if wrank == 0:
         self.assertEqual(message, None)
     else:
         self.assertEqual(message, TestDPM.message)
     intercomm.Free()
Пример #10
0
    def __init__(self, port, config, device):
        Server.__init__(self, port=port)
        PTBase.__init__(self, config=config, device=device)

        #######

        self.info = MPI.INFO_NULL

        self.port = MPI.Open_port(self.info)

        self.service = 'parallel-training'

        MPI.Publish_name(self.service, self.info, self.port)

        self.worker_comm = {}
        self.worker_rank = {}
        self.first_worker_id = None
Пример #11
0
                    root = 0
                message = intercomm.bcast(message, root)
                if rank == 0:
                    self.assertEqual(message, None)
                else:
                    self.assertEqual(message, TestDPM.message)
                intercomm.Free()
        MPI.COMM_WORLD.Barrier()


name, version = MPI.get_vendor()
if name == 'MPICH' or name == 'MPICH2':
    if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
        del TestDPM.testNamePublishing
elif name == 'Open MPI':
    del TestDPM
elif name == 'MVAPICH2':
    del TestDPM
elif name == 'Microsoft MPI':
    del TestDPM
elif name == 'Platform MPI':
    del TestDPM.testNamePublishing
else:
    try:
        MPI.Close_port(MPI.Open_port())
    except NotImplementedError:
        del TestDPM

if __name__ == '__main__':
    unittest.main()
Пример #12
0
                                      level_log)

        # variable for communication between thread
        status_data = [1]  # status of the buffer
        buffer = [np.array([])]

        # object for analysing data
        store = store_data(path_folder_config, param)

        ############
        # Open the MPI port connection for receiver
        info = MPI.INFO_NULL
        root = 0

        logger_master.info('Translate Receive: before open_port')
        port_receive = MPI.Open_port(info)
        logger_master.info('Translate Receive: after open_port')
        # Write file configuration of the port
        path_to_files = path_folder_config + file_spike_detector
        fport = open(path_to_files, "w+")
        fport.write(port_receive)
        fport.close()
        # rename forces that when the file is there it also contains the port
        pathlib.Path(path_to_files + '.unlock').touch()
        logger_master.info('Translate Receive: path_file: ' + path_to_files)
        # Wait until connection
        logger_master.info('Waiting communication')
        comm_receiver = MPI.COMM_WORLD.Accept(port_receive, info, root)
        logger_master.info('get communication and start thread')
        #########################
Пример #13
0
def input(path):
    """
    Simulate some random current input
    :param path: the file for the configurations of the connection
    :return:
    """
    #Start communication channels
    path_to_files = path
    #For NEST
    # Init connection
    print("Waiting for port details")
    info = MPI.INFO_NULL
    root = 0
    port = MPI.Open_port(info)
    fport = open(path_to_files, "w+")
    fport.write(port)
    fport.close()
    print('wait connection ' + port)
    sys.stdout.flush()
    comm = MPI.COMM_WORLD.Accept(port, info, root)
    print('connect to ' + port)

    #test one rate
    status_ = MPI.Status()
    check = np.empty(1, dtype='b')
    starting = 1
    while True:
        comm.Recv([check, 1, MPI.CXX_BOOL],
                  source=0,
                  tag=MPI.ANY_TAG,
                  status=status_)
        print(" start to send")
        sys.stdout.flush()
        print(" status a tag ", status_.Get_tag())
        sys.stdout.flush()
        if status_.Get_tag() == 0:
            # receive list ids
            size_list = np.empty(1, dtype='i')
            comm.Recv([size_list, 1, MPI.INT], source=0, tag=0, status=status_)
            print("size list id", size_list)
            sys.stdout.flush()
            list_id = np.empty(size_list, dtype='i')
            comm.Recv([list_id, size_list, MPI.INT],
                      source=0,
                      tag=0,
                      status=status_)
            print(" id ", list_id)
            sys.stdout.flush()
            shape = np.random.randint(0, 50, 1, dtype='i') * 2
            data = starting + np.random.rand(shape[0]) * 200
            data = np.around(np.sort(np.array(data, dtype='d')), decimals=1)
            send_shape = np.array(np.concatenate([shape, shape]), dtype='i')
            comm.Send([send_shape, MPI.INT],
                      dest=status_.Get_source(),
                      tag=list_id[0])
            print(" shape data ", shape)
            sys.stdout.flush()
            comm.Send([data, MPI.DOUBLE],
                      dest=status_.Get_source(),
                      tag=list_id[0])
            print(" send data", data)
            sys.stdout.flush()
            comm.Recv([check, 1, MPI.CXX_BOOL],
                      source=status_.Get_source(),
                      tag=MPI.ANY_TAG,
                      status=status_)
            print("end run")
            sys.stdout.flush()
            starting += 200
        elif (status_.Get_tag() == 2):
            print("end simulation")
            sys.stdout.flush()
            print("ending time : ", starting)
            sys.stdout.flush()
            break
        else:
            print(status_.Get_tag())
            break
    comm.Disconnect()
    MPI.Close_port(port)
    os.remove(path_to_files)
    print('exit')
    MPI.Finalize()
    def __server(self):
        """
            Server side logic for the client/server MPI communication approach
            NOTE: check notes below where the method is called
        Returns:

        """
        if self.__mpi_cw_rank == 0:
            try:
                file_object = open(self.__mmap_path_filename, 'rb+')
            except FileNotFoundError:
                self.error('{} could not be opened'.format(
                    self.__mmap_path_filename))
                return RETURN_NOT_OK

            try:
                mmap_object = mmap.mmap(file_object.fileno(),
                                        length=0,
                                        access=mmap.ACCESS_WRITE,
                                        offset=0)
            except SyntaxError:
                self.error('{} could not be mmaped'.format(
                    self.__mmap_path_filename))
                return RETURN_NOT_OK

            # Open Listening Port
            try:
                self.__mpi_info = MPI.INFO_NULL
                self.__mpi_port_name = MPI.Open_port(self.__mpi_info)
                self.__mpi_port_name_length = len(self.__mpi_port_name)
            except MPI.Exception as ierr:
                self.report_mpi_error(mpi_ierr=ierr,
                                      mpi_operation_name='MPI.Open_port')
                return RETURN_NOT_OK

            # publishing the port name by means of the mmaped file
            mmap_object.seek(0)
            mmap_object[MemMap.PORT_LENGTH_LOC:MemMap.PORT_LENGTH_LENGTH] = \
                self.__mpi_port_name_length.to_bytes(length=MemMap.PORT_LENGTH_LENGTH, byteorder='big')

            mmap_object[MemMap.PORT_NAME_LOC:MemMap.PORT_NAME_LOC + self.__mpi_port_name_length] = \
                bytes('{}'.format(self.__mpi_port_name), 'utf8', )
            mmap_object.flush()

            #
            self.__mpi_expected_source = 0
        else:
            self.__mpi_expected_source = MPI.PROC_NULL

        self.info('waiting for client connection {}'.format(
            self.__mpi_port_name))
        self.__mpi_comm = self.__MPI_COMM_WORLD.Accept(self.__mpi_port_name,
                                                       self.__mpi_info,
                                                       self.__mpi_root)

        self.info('client connected, waiting for PING value')
        ping_value = self.__mpi_comm.recv(source=self.__mpi_expected_source,
                                          tag=self.__mpi_message_tag)

        if self.__mpi_cw_rank == 0:
            if ping_value == self.__ping_value:
                self.info('Gotten expected PING value {} from client'.format(
                    ping_value))
            else:
                self.info(
                    'expected PING value from client should be {}, received {}'
                    .format(self.__ping_value, ping_value))
                self.__mpi_comm.Disconnect()
                return RETURN_NOT_OK
        else:
            # finishing server running on rank > 0 MPI processes
            self.info('Gotten {} from client'.format(ping_value))
            return RETURN_OK

        self.info('sending PONG value')
        self.__mpi_comm.send(self.__pong_value,
                             dest=0,
                             tag=self.__mpi_message_tag)

        while True:
            self.info('Waiting for the poison pill')
            poison_pill = self.__mpi_comm.recv(
                source=self.__mpi_expected_source, tag=self.__mpi_message_tag)

            if poison_pill is None:
                break

            self.info('by waiting for poison pill, {} was received'.format(
                poison_pill))

        self.__mpi_comm.Disconnect()

        if self.__mpi_cw_rank == 0:
            MPI.Close_port(self.__mpi_port_name)
        self.info('Server Processing Done')

        return RETURN_OK
Пример #15
0
 def internal__open_port(self, outportname):
     outportname.value = MPI.Open_port(self.get_null_info())
     return 0
Пример #16
0
# server.py
"""
Server side of the MPI client/server programming model.

Run this with 1 processes like:
$ mpiexec -n 1 python server.py
"""

import numpy as np
from mpi4py import MPI

comm = MPI.COMM_WORLD

service_name = 'compute'
# open a port
port_name = MPI.Open_port()
# bind the opened port to a service_name,
# client can connect to the port by looking-up this service_name
MPI.Publish_name(service_name, port_name)
# wait for client to connect
inter_comm = comm.Accept(port_name)

# receive message from client
recv_obj = inter_comm.recv(source=0, tag=0)
print 'Server receives %s from client.' % recv_obj
send_obj = eval(recv_obj)
# reply the result to the client
print 'Server sends %s to client.' % send_obj
inter_comm.send(send_obj, dest=0, tag=1)

# unpublish the service_name, close the port and disconnect
Пример #17
0
        path_config, 'tvb_to_nest_master' + str(id_first_spike_detector),
        log_level)

    # variable for communication between thread
    status_data = [0]
    initialisation = np.load(param['init'])
    buffer_spike = [initialisation]

    ### Create Com objects for communications
    info = MPI.INFO_NULL
    root = 0

    ##############################################
    # Create the port, file and set unlock for sender
    logger_master.info('Translate SEND: before open_port')
    port_send = MPI.Open_port(info)  # open a NEW port
    logger_master.info('Translate SEND: after open_port : ' + port_send)
    path_to_files_sends = []
    path_to_files_sends_unlock = []
    for i in range(nb_spike_generator):
        # write file with port and unlock
        path_to_files_send = os.path.join(
            path_config,
            str(id_first_spike_detector + i) + ".txt")
        fport_send = open(path_to_files_send, "w+")
        fport_send.write(port_send)
        fport_send.close()

        path_to_files_send_unlock = os.path.join(
            path_config,
            str(id_first_spike_detector + i) + ".txt.unlock")
def input(path,file,nb_mpi,nb_run,time_sim):
    """
    Simulate some random spike train input
    :param path: the file for the configurations of the connection
    :param file: number of the device and the file connection
    :param nb_mpi: number of mpi of nest
    :param nb_run: the number of run of the simulation (should be more integrate)
    :param time_sim: time of one simulation
    :return:
    """
    logger = create_logger(path,name='input_'+file)
    datas = generate_current(int(file),nb_run,time_sim)
    #Start communication channels
    path_to_files = path + file + '.txt'
    #For NEST
    # Init connection
    logger.info("Waiting for port details")
    info = MPI.INFO_NULL
    root=0
    port = MPI.Open_port(info)
    fport = open(path_to_files, "w+")
    fport.write(port)
    fport.close()
    logger.info('wait connection '+port)
    sys.stdout.flush()
    comm = MPI.COMM_WORLD.Accept(port, info, root)
    logger.info('connect to '+port)

    #test one rate
    status_ = MPI.Status()
    check = np.empty(1,dtype='b')
    source_sending = np.arange(0,nb_mpi,1) # list of all the process for the communication
    starting = 1
    run_x=0
    while True:
        tag = -1
        for source in source_sending:
            comm.Recv([check, 1, MPI.CXX_BOOL], source=source, tag=MPI.ANY_TAG, status=status_)
            if tag == -1:
                tag = status_.Get_tag()
            elif tag != status_.Get_tag():
                raise Exception('bad tags')

        logger.info(" start to send")
        logger.info(" status a tag "+str(status_.Get_tag()))
        if tag == 0 :
            # receive list ids
            size_list = np.empty(1, dtype='i')
            shape = np.array(datas[0][run_x],dtype='i')
            data = np.array(np.concatenate(datas[1][run_x]),dtype='d')
            logger.info("shape data init"+str( data.shape)+ " shape  "+str(shape))
            for source in source_sending:
                logger.info(" source :"+str(source))
                comm.Recv([size_list, 1, MPI.INT], source=source, tag=0, status=status_)
                if size_list[0] != 0 :
                    logger.info("size list id"+str(size_list))
                    list_id = np.empty(size_list, dtype='i')
                    comm.Recv([list_id, size_list, MPI.INT], source=status_.Get_source(), tag=0, status=status_)
                    logger.info("id "+str(list_id))
                    data_send = np.concatenate(np.repeat([data],size_list[0]+1,axis=0))
                    logger.info(data_send)
                    send_shape = np.array(np.concatenate([[data_send.shape[0]], np.repeat(shape,size_list[0])]), dtype='i')
                    comm.Send([send_shape, MPI.INT], dest=status_.Get_source(), tag=list_id[0])
                    logger.info("shape data "+str(send_shape))
                    comm.Send([data_send, MPI.DOUBLE], dest=status_.Get_source(), tag=list_id[0])
                    logger.info("send data "+str( data.shape)+" data send "+str(data_send.shape))
            for source in source_sending:
                comm.Recv([check, 1, MPI.CXX_BOOL], source=source, tag=MPI.ANY_TAG, status=status_)
                logger.info("source "+str(source)+" end run")
            run_x+=1
            starting+=200
        elif(tag == 2):
            logger.info("end simulation")
            logger.info("ending time : "+str(starting))
            break
        else:
            logger.info(tag)
            break
    comm.Disconnect()
    MPI.Close_port(port)
    os.remove(path_to_files)
    logger.info('exit')
    MPI.Finalize()
Пример #19
0
def analyse(path, file, nb_mpi):
    """
    simulate the recorder module
    :param path: the file for the configurations of the connection
    :param file: number of the device and the file connection
    :param nb_mpi: number of mpi rank for testing multi-threading and MPI simulation
    :return:
    """
    data_save = []
    logger = create_logger(path, name='record_' + file)
    # Start communication channels
    path_to_files = path + file + '.txt'
    # For NEST
    # Init connection
    logger.info("Waiting for port details")
    info = MPI.INFO_NULL
    root = 0
    port = MPI.Open_port(info)
    fport = open(path_to_files, "w+")
    fport.write(port)
    fport.close()
    logger.info('wait connection ' + port)
    comm = MPI.COMM_WORLD.Accept(port, info, root)
    logger.info('connect to ' + port)

    #test one rate
    status_ = MPI.Status()
    check = np.empty(1, dtype='b')
    source_sending = np.arange(
        0, comm.Get_remote_size(),
        1)  # list of all the process for the commmunication
    while (True):
        comm.Recv([check, 1, MPI.CXX_BOOL],
                  source=MPI.ANY_SOURCE,
                  tag=MPI.ANY_TAG,
                  status=status_)
        logger.info(" start to send")
        logger.info(" status a tag " + str(status_.Get_tag()) + " source " +
                    str(status_.Get_source()))
        if status_.Get_tag() == 0:
            for i in range(nb_mpi - 1):
                comm.Recv([check, 1, MPI.CXX_BOOL],
                          source=MPI.ANY_SOURCE,
                          tag=0,
                          status=status_)
            for source in source_sending:
                logger.info("source is " + str(source))
                comm.Send([np.array(True, dtype='b'), MPI.BOOL],
                          dest=source,
                          tag=0)
                shape = np.empty(1, dtype='i')
                comm.Recv([shape, 1, MPI.INT],
                          source=source,
                          tag=0,
                          status=status_)
                logger.info("shape is " + str(shape[0]))
                data = np.empty(shape[0], dtype='d')
                comm.Recv([data, shape[0], MPI.DOUBLE],
                          source=status_.Get_source(),
                          tag=0,
                          status=status_)
                data_save.append(
                    copy.deepcopy(data).reshape(int(shape[0] / 3), 3))
                logger.info("data is " + str(data))
        elif status_.Get_tag() == 1:
            logger.info("end run")
            pass
        elif status_.Get_tag() == 2:
            for i in range(nb_mpi - 1):
                logger.info(" receive ending " + str(i))
                comm.Recv([check, 1, MPI.CXX_BOOL],
                          source=MPI.ANY_SOURCE,
                          tag=MPI.ANY_TAG,
                          status=status_)
            logger.info("end simulation")
            break
        else:
            logger.info(str(status_.Get_tag()))
            break
    comm.Disconnect()
    MPI.Close_port(port)
    os.remove(path_to_files)
    logger.info('exit')
    MPI.Finalize()
    np.save(path + '../recording_mpi_' + file + '.npy',
            data_save,
            allow_pickle=True)
Пример #20
0
 def internal__open_port(self, outportname):
     outportname.value = MPI.Open_port(None)
     return 0
 def internal__open_port(self, port_identifier):
     port_identifier.value = MPI.Open_port(self.get_null_info())
     return 0
Пример #22
0
if name == 'Open MPI':
    if version < (3, 0, 0):
        SKIP_POOL_TEST = True
    if version == (4, 0, 0):
        SKIP_POOL_TEST = True
    if version == (4, 0, 1) and sys.platform == 'darwin':
        SKIP_POOL_TEST = True
    if version == (4, 0, 2) and sys.platform == 'darwin':
        SKIP_POOL_TEST = True
if name == 'MPICH':
    if sys.platform == 'darwin':
        if version >= (3, 4) and version < (4, 0):
            SKIP_POOL_TEST = True
    if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
        SKIP_POOL_TEST = True
    port = MPI.Open_port()
    if port == "":
        SKIP_POOL_TEST = True
    MPI.Close_port(port)
    del port
if name == 'MVAPICH2':
    SKIP_POOL_TEST = True
if name == 'MPICH2':
    if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
        SKIP_POOL_TEST = True
if name == 'Microsoft MPI':
    if version < (8, 1, 0):
        SKIP_POOL_TEST = True
    if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
        SKIP_POOL_TEST = True
if name == 'Platform MPI':
Пример #23
0
from mpi4py import MPI

rank = MPI.COMM_WORLD.Get_rank()

info = MPI.INFO_NULL

port = MPI.Open_port(info)
print "Server port: '%s'", port

service = 'cpi'
MPI.Publish_name(service, info, port)
print 'Service %s published', service

root = 0
print 'Waiting for connection request'
comm = MPI.COMM_WORLD.Accept(port, info, root)
print 'Connected to one client'

while True:

    message = comm.recv(source=0, tag=0)
    if message == 'quit':
        break
    else:
        print 'Receive one message from client:%s' % message

comm.Disconnect()
print 'Connected with one client'

MPI.Unpublish_name(service, info, port)
print 'Service unpublished'
Пример #24
0
def analyse(path):
    """
    simulate the recorder module
    :param path: the file for the configurations of the connection
    :return:
    """
    #Start communication channels
    path_to_files = path
    #For NEST
    # Init connection
    print("Waiting for port details")
    info = MPI.INFO_NULL
    root = 0
    port = MPI.Open_port(info)
    fport = open(path_to_files, "w+")
    fport.write(port)
    fport.close()
    print('wait connection ' + port)
    comm = MPI.COMM_WORLD.Accept(port, info, root)
    print('connect to ' + port)

    #test one rate
    status_ = MPI.Status()
    check = np.empty(1, dtype='b')
    while (True):
        comm.Recv([check, 1, MPI.CXX_BOOL],
                  source=MPI.ANY_SOURCE,
                  tag=MPI.ANY_TAG,
                  status=status_)
        print(" start to send")
        sys.stdout.flush()
        print(" status a tag ", status_.Get_tag())
        sys.stdout.flush()
        if status_.Get_tag() == 0:
            comm.Send([np.array(True, dtype='b'), MPI.BOOL],
                      dest=status_.Get_source(),
                      tag=0)
            shape = np.empty(1, dtype='i')
            comm.Recv([shape, 1, MPI.INT],
                      source=status_.Get_source(),
                      tag=0,
                      status=status_)
            print("shape is", shape)
            sys.stdout.flush()
            data = np.empty(shape[0], dtype='d')
            comm.Recv([data, shape[0], MPI.DOUBLE],
                      source=status_.Get_source(),
                      tag=0,
                      status=status_)
            print("data is ", data)
            sys.stdout.flush()
            comm.Recv([check, 1, MPI.CXX_BOOL],
                      source=status_.Get_source(),
                      tag=MPI.ANY_TAG,
                      status=status_)
            print("end run")
            sys.stdout.flush()
        elif status_.Get_tag() == 2:
            print("end simulation")
            sys.stdout.flush()
            break
        else:
            print(status_.Get_tag())
            break
    comm.Disconnect()
    MPI.Close_port(port)
    os.remove(path_to_files)
    print('exit')
    MPI.Finalize()
def input(path,nb_mpi):
    """
    Simulate some random spike train input
    :param path: the file for the configurations of the connection
    :param nb_mpi: number of mpi rank for testing multi-threading and MPI simulation
    :return:
    """
    #Start communication channels
    path_to_files = path
    #For NEST
    # Init connection
    print("INPUT : Waiting for port details")
    info = MPI.INFO_NULL
    root=0
    port = MPI.Open_port(info)
    fport = open(path_to_files, "w+")
    fport.write(port)
    fport.close()
    print('INPUT : wait connection '+port)
    sys.stdout.flush()
    comm = MPI.COMM_WORLD.Accept(port, info, root)
    print('INPUT : connect to '+port)

    #test one rate
    status_ = MPI.Status()
    check = np.empty(1,dtype='b')
    source_sending = np.arange(0,comm.Get_remote_size(),1) # list of all the process for the commmunication
    starting = 1
    while True:
        comm.Recv([check, 1, MPI.CXX_BOOL], source=0, tag=MPI.ANY_TAG, status=status_)
        print("INPUT :  start to send"); sys.stdout.flush()
        print("INPUT :  status a tag ",status_.Get_tag() ); sys.stdout.flush()
        if status_.Get_tag() == 0 :
            for source in source_sending:
                if source != 0:
                    comm.Recv([check, 1, MPI.CXX_BOOL], source=source, tag=MPI.ANY_TAG, status=status_)
                print("Input : source is", source); sys.stdout.flush()
                # receive list ids
                size_list = np.empty(1, dtype='i')
                comm.Recv([size_list, 1, MPI.INT], source=source, tag=0, status=status_)
                print("INPUT : size list id",size_list);sys.stdout.flush()
                if size_list[0] != 0:
                    list_id = np.empty(size_list, dtype='i')
                    comm.Recv([list_id, size_list, MPI.INT], source=source, tag=0, status=status_)
                    print("INPUT :  id ", list_id);sys.stdout.flush()
                    shape = np.random.randint(0,100,1,dtype='i')
                    data = starting+np.random.rand(shape[0])*200
                    data = np.around(np.sort(np.array(data,dtype='d')),decimals=1)
                    send_shape = np.array(np.concatenate([shape,shape]),dtype ='i')
                    comm.Send([send_shape, MPI.INT], dest=source, tag=list_id[0])
                    print("INPUT :  shape data ",shape);sys.stdout.flush()
                    comm.Send([data, MPI.DOUBLE], dest=source, tag=list_id[0])
                    print("INPUT :  send data", data);sys.stdout.flush()
            for source in source_sending:
                print("INPUT :  end");sys.stdout.flush()
                comm.Recv([check, 1, MPI.CXX_BOOL], source=source, tag=MPI.ANY_TAG, status=status_)
            print("INPUT : end run");sys.stdout.flush()
            starting+=200
        elif(status_.Get_tag() ==2):
            for i in range(nb_mpi-1):
                print(" receive ending");sys.stdout.flush()
                comm.Recv([check, 1, MPI.CXX_BOOL], source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status_)
            print("INPUT : end simulation");sys.stdout.flush()
            print("INPUT : ending time : ",starting)
            break
        else:
            print(status_.Get_tag())
            break
    comm.Disconnect()
    MPI.Close_port(port)
    os.remove(path_to_files)
    print('INPUT : exit')
    MPI.Finalize()