Esempio n. 1
0
def close_and_finalize(port_send, port_receive, logger_master):
    # close port
    MPI.Close_port(port_send)
    MPI.Close_port(port_receive)
    logger_master.info('close communicator')
    # finalise MPI
    MPI.Finalize()
Esempio n. 2
0
    def close():

        MPI.Unpublish_name(self.service, self.info, self.port)
        print '[Server] Service unpublished'

        MPI.Close_port(self.port)
        print '[Server] Service port closed'
Esempio n. 3
0
def main_server(COMM):
    nprocs = COMM.Get_size()
    myrank = COMM.Get_rank()

    service, port, info = None, None, MPI.INFO_NULL
    if myrank == 0:
        port = MPI.Open_port(info)
        log(COMM, "open port '%s'", port)
        service = 'cpi'
        MPI.Publish_name(service, info, port)
        log(COMM, "service '%s' published.", service)
    else:
        port = ''

    log(COMM, "waiting for client connection ...")
    icomm = COMM.Accept(port, info, root=0)
    log(COMM, "client connection accepted.")

    worker(icomm)

    log(COMM, "disconnecting from client ...")
    icomm.Disconnect()
    log(COMM, "client disconnected.")

    if myrank == 0:
        MPI.Unpublish_name(service, info, port)
        log(COMM, "service '%s' unpublished", port)
        MPI.Close_port(port)
        log(COMM, "closed  port '%s' ", port)
Esempio n. 4
0
    def __del__(self):
         if ( self.procID == 0 ):
	     MPI.Close_port(self.portName)
         self.SERVER_COMM_CLIENT.Barrier()
	 self.SERVER_COMM_CLIENT.Disconnect()
	 self.PARENT.Barrier()
	 self.PARENT.Disconnect()
def simulate_TVB_reception(path):
    '''
    simulate the receptor of the translator for nest to TVB
    :param path: the path to the file for the connections
    :return:
    '''
    # Init connection from file connection
    print(path)
    print("TVB INPUT : Waiting for port details")
    sys.stdout.flush()

    while not os.path.exists(path):
        print("Port file not found yet, retry in 1 second")
        time.sleep(1)

    fport = open(path, "r")
    port = fport.readline()
    fport.close()
    print("TVB INPUT :wait connection " + port)
    sys.stdout.flush()
    comm = MPI.COMM_WORLD.Connect(port)
    print('TVB INPUT :connect to ' + port)
    sys.stdout.flush()

    status_ = MPI.Status()
    while (True):
        # send to the translator, I want the next part
        req = comm.isend(True, dest=1, tag=0)
        req.wait()

        times = np.empty(2, dtype='d')
        comm.Recv([times, MPI.FLOAT], source=1, tag=0)
        # get the size of the rate
        size = np.empty(1, dtype='i')
        comm.Recv([size, MPI.INT], source=1, tag=0)
        # get the rate
        rates = np.empty(size, dtype='d')
        comm.Recv([rates, size, MPI.DOUBLE],
                  source=1,
                  tag=MPI.ANY_TAG,
                  status=status_)
        # print the summary of the data
        if status_.Get_tag() == 0:
            print("TVB INPUT :", comm.Get_rank(), times, np.sum(rates))
            sys.stdout.flush()
        else:
            break
        if times[1] > 9900:
            break
    # closing the connection at this end
    req = comm.isend(True, dest=1, tag=1)
    req.wait()
    print('TVB INPUT :end')
    sys.stdout.flush()
    comm.Disconnect()
    MPI.Close_port(port)
    print('TVB INPUT :exit')
    sys.stdout.flush()
    MPI.Finalize()
Esempio n. 6
0
def badport():
    if MPI.get_vendor()[0] != 'MPICH':
        return False
    try:
        port = MPI.Open_port()
        MPI.Close_port(port)
    except:
        port = ""
    return port == ""
Esempio n. 7
0
 def testNamePublishing(self):
     rank = MPI.COMM_WORLD.Get_rank()
     service = "mpi4py-%d" % rank
     port = MPI.Open_port()
     MPI.Publish_name(service, port)
     found =  MPI.Lookup_name(service)
     self.assertEqual(port, found)
     MPI.Unpublish_name(service, port)
     MPI.Close_port(port)
Esempio n. 8
0
 def testConnectAccept(self):
     comm_self = MPI.COMM_SELF
     comm_world = MPI.COMM_WORLD
     wsize = comm_world.Get_size()
     wrank = comm_world.Get_rank()
     if wsize == 1: return
     group_world = comm_world.Get_group()
     group = group_world.Excl([0])
     group_world.Free()
     comm = comm_world.Create(group)
     group.Free()
     if wrank == 0:
         self.assertEqual(comm, MPI.COMM_NULL)
     else:
         self.assertNotEqual(comm, MPI.COMM_NULL)
         self.assertEqual(comm.size, comm_world.size - 1)
         self.assertEqual(comm.rank, comm_world.rank - 1)
     if wrank == 0:
         port = comm_world.recv(source=1)
         intercomm = comm_self.Connect(port)
         self.assertEqual(intercomm.remote_size, comm_world.size - 1)
         self.assertEqual(intercomm.size, 1)
         self.assertEqual(intercomm.rank, 0)
     else:
         if wrank == 1:
             port = MPI.Open_port()
             comm_world.send(port, dest=0)
         else:
             port = None
         intercomm = comm.Accept(port, root=0)
         if wrank == 1:
             MPI.Close_port(port)
         self.assertEqual(intercomm.remote_size, 1)
         self.assertEqual(intercomm.size, comm_world.size - 1)
         self.assertEqual(intercomm.rank, comm.rank)
         comm.Free()
     if wrank == 0:
         message = TestDPM.message
         root = MPI.ROOT
     else:
         message = None
         root = 0
     message = intercomm.bcast(message, root)
     if wrank == 0:
         self.assertEqual(message, None)
     else:
         self.assertEqual(message, TestDPM.message)
     intercomm.Free()
Esempio n. 9
0
def end_mpi(comm, path, sending, logger):
    """
    ending the communication
    :param comm: MPI communicator
    :param path: for the close the port
    :param sending: if the translator is for sending or receiving data
    :return: nothing
    """
    # read the port before the deleted file
    fport = open(path, "r")
    port = fport.readline()
    fport.close()
    # different ending of the translator
    if sending:
        logger.info("TVB close connection send " + port)
        sys.stdout.flush()
        status_ = MPI.Status()
        # wait until the translator accept the connections
        logger.info("TVB send check")
        accept = False
        while not accept:
            req = comm.irecv(source=0, tag=0)
            accept = req.wait(status_)
        logger.info("TVB send end simulation")
        source = status_.Get_source()  # the id of the excepted source
        times = np.array([0., 0.],
                         dtype='d')  # time of starting and ending step
        comm.Send([times, MPI.DOUBLE], dest=source, tag=1)
    else:
        logger.info("TVB close connection receive " + port)
        # send to the translator : I want the next part
        req = comm.isend(True, dest=1, tag=1)
        req.wait()
    # closing the connection at this end
    logger.info("TVB disconnect communication")
    comm.Disconnect()
    logger.info("TVB close " + port)
    MPI.Close_port(port)
    logger.info("TVB close connection " + port)
    return
Esempio n. 10
0
    logger_receive = create_logger(
        path_config, 'tvb_to_nest_receive' + str(id_first_spike_detector),
        log_level)
    # create the thread for receive and send data
    th_send = Thread(target=send,
                     args=(logger_send, id_first_spike_detector, status_data,
                           buffer_spike, comm_send))
    th_receive = Thread(target=receive,
                        args=(logger_receive, generator, status_data,
                              buffer_spike, comm_receive))

    # start the threads
    # FAT END POINT
    logger_master.info('Start thread')
    th_receive.start()
    th_send.start()
    th_receive.join()
    th_send.join()
    logger_master.info('thread join')
    MPI.Close_port(port_send)
    MPI.Close_port(port_receive)
    logger_master.info('close communicator')
    MPI.Finalize()

    logger_master.info('clean file')
    # Clean up port files and locks
    for path_send in path_to_files_sends:
        os.remove(path_send)
    os.remove(path_to_files_receive)
    logger_master.info('end')
def simulate_spike_detector(path, min_delay):
    '''
    simulate spike detector output for testing the nest to tvb translator input
    :param path: the path to the file for the connections
    :param min_delay: the time of one simulation
    :return:
    '''
    # Init connection from file connection
    print(path)
    print("Nest Output : Waiting for port details")
    sys.stdout.flush()

    while not os.path.exists(path):
        print("Port file not found yet, retry in 1 second")
        time.sleep(1)

    fport = open(path, "r")
    port = fport.readline()
    fport.close()
    print('Nest Output : wait connection ' + port)
    sys.stdout.flush()
    comm = MPI.COMM_WORLD.Connect(port)
    print('Nest Output : connect to ' + port)
    sys.stdout.flush()

    starting = 0.0  # the begging of each time of synchronization
    check = np.empty(1, dtype='b')
    status_ = MPI.Status()  # status of the different message
    while True:
        # wait until the translator accept the connections
        comm.Send([np.array([True], dtype='b'), 1, MPI.CXX_BOOL],
                  dest=0,
                  tag=0)
        comm.Recv([check, 1, MPI.CXX_BOOL],
                  source=MPI.ANY_SOURCE,
                  tag=0,
                  status=status_)
        # create random data
        size = np.random.randint(0, 1000)
        times = starting + np.random.rand(size) * (min_delay - 0.2)
        times = np.around(np.sort(np.array(times)), decimals=1)
        id_neurons = np.random.randint(0, 10, size)
        id_detector = np.random.randint(0, 10, size)
        data = np.ascontiguousarray(np.swapaxes(
            [id_detector, id_neurons, times], 0, 1),
                                    dtype='d')
        # send data one by one like spike generator
        comm.Send([np.array([size * 3], dtype='i'), 1, MPI.INT],
                  dest=status_.Get_source(),
                  tag=0)
        comm.Send([data, size * 3, MPI.DOUBLE],
                  dest=status_.Get_source(),
                  tag=0)
        # ending the actual run
        comm.Send([np.array([True], dtype='b'), 1, MPI.CXX_BOOL],
                  dest=0,
                  tag=1)
        #print result and go to the next run
        print("Nest Output : ", comm.Get_rank(), size)
        sys.stdout.flush()
        starting += min_delay
        if starting > 10000:
            break
    # closing the connection at this end
    print("Nest Output : ending")
    sys.stdout.flush()
    # send the signal for end the translation
    comm.Send([np.array([True], dtype='b'), 1, MPI.CXX_BOOL], dest=0, tag=2)
    print("Nest Output : ending")
    sys.stdout.flush()
    comm.Disconnect()
    MPI.Close_port(port)
    MPI.Finalize()
    print('Nest Output : exit')
    sys.stdout.flush()
Esempio n. 12
0
                    root = 0
                message = intercomm.bcast(message, root)
                if rank == 0:
                    self.assertEqual(message, None)
                else:
                    self.assertEqual(message, TestDPM.message)
                intercomm.Free()
        MPI.COMM_WORLD.Barrier()


name, version = MPI.get_vendor()
if name == 'MPICH' or name == 'MPICH2':
    if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
        del TestDPM.testNamePublishing
elif name == 'Open MPI':
    del TestDPM
elif name == 'MVAPICH2':
    del TestDPM
elif name == 'Microsoft MPI':
    del TestDPM
elif name == 'Platform MPI':
    del TestDPM.testNamePublishing
else:
    try:
        MPI.Close_port(MPI.Open_port())
    except NotImplementedError:
        del TestDPM

if __name__ == '__main__':
    unittest.main()
Esempio n. 13
0
        logger_master.info('Translate Receive: path_file: ' + path_to_files)
        # Wait until connection
        logger_master.info('Waiting communication')
        comm_receiver = MPI.COMM_WORLD.Accept(port_receive, info, root)
        logger_master.info('get communication and start thread')
        #########################

        # create the thread for receive and save data
        logger_receive = create_logger(path_folder_config,
                                       'nest_to_tvb_receive', level_log)
        logger_save = create_logger(path_folder_config, 'nest_to_tvb_send',
                                    level_log)
        th_receive = Thread(target=receive,
                            args=(logger_receive, store, status_data, buffer,
                                  comm_receiver))
        th_save = Thread(target=save,
                         args=(path_folder_save, logger_save, nb_step,
                               step_save, status_data, buffer))

        # start the threads
        # FAT END POINT
        logger_master.info('start thread')
        th_receive.start()
        th_save.start()
        th_receive.join()
        th_save.join()
        logger_master.info('join thread')
        MPI.Close_port(port_receive)
        MPI.Finalize()
    else:
        print('missing argument')
Esempio n. 14
0
def input(path):
    """
    Simulate some random current input
    :param path: the file for the configurations of the connection
    :return:
    """
    #Start communication channels
    path_to_files = path
    #For NEST
    # Init connection
    print("Waiting for port details")
    info = MPI.INFO_NULL
    root = 0
    port = MPI.Open_port(info)
    fport = open(path_to_files, "w+")
    fport.write(port)
    fport.close()
    print('wait connection ' + port)
    sys.stdout.flush()
    comm = MPI.COMM_WORLD.Accept(port, info, root)
    print('connect to ' + port)

    #test one rate
    status_ = MPI.Status()
    check = np.empty(1, dtype='b')
    starting = 1
    while True:
        comm.Recv([check, 1, MPI.CXX_BOOL],
                  source=0,
                  tag=MPI.ANY_TAG,
                  status=status_)
        print(" start to send")
        sys.stdout.flush()
        print(" status a tag ", status_.Get_tag())
        sys.stdout.flush()
        if status_.Get_tag() == 0:
            # receive list ids
            size_list = np.empty(1, dtype='i')
            comm.Recv([size_list, 1, MPI.INT], source=0, tag=0, status=status_)
            print("size list id", size_list)
            sys.stdout.flush()
            list_id = np.empty(size_list, dtype='i')
            comm.Recv([list_id, size_list, MPI.INT],
                      source=0,
                      tag=0,
                      status=status_)
            print(" id ", list_id)
            sys.stdout.flush()
            shape = np.random.randint(0, 50, 1, dtype='i') * 2
            data = starting + np.random.rand(shape[0]) * 200
            data = np.around(np.sort(np.array(data, dtype='d')), decimals=1)
            send_shape = np.array(np.concatenate([shape, shape]), dtype='i')
            comm.Send([send_shape, MPI.INT],
                      dest=status_.Get_source(),
                      tag=list_id[0])
            print(" shape data ", shape)
            sys.stdout.flush()
            comm.Send([data, MPI.DOUBLE],
                      dest=status_.Get_source(),
                      tag=list_id[0])
            print(" send data", data)
            sys.stdout.flush()
            comm.Recv([check, 1, MPI.CXX_BOOL],
                      source=status_.Get_source(),
                      tag=MPI.ANY_TAG,
                      status=status_)
            print("end run")
            sys.stdout.flush()
            starting += 200
        elif (status_.Get_tag() == 2):
            print("end simulation")
            sys.stdout.flush()
            print("ending time : ", starting)
            sys.stdout.flush()
            break
        else:
            print(status_.Get_tag())
            break
    comm.Disconnect()
    MPI.Close_port(port)
    os.remove(path_to_files)
    print('exit')
    MPI.Finalize()
    def __server(self):
        """
            Server side logic for the client/server MPI communication approach
            NOTE: check notes below where the method is called
        Returns:

        """
        if self.__mpi_cw_rank == 0:
            try:
                file_object = open(self.__mmap_path_filename, 'rb+')
            except FileNotFoundError:
                self.error('{} could not be opened'.format(
                    self.__mmap_path_filename))
                return RETURN_NOT_OK

            try:
                mmap_object = mmap.mmap(file_object.fileno(),
                                        length=0,
                                        access=mmap.ACCESS_WRITE,
                                        offset=0)
            except SyntaxError:
                self.error('{} could not be mmaped'.format(
                    self.__mmap_path_filename))
                return RETURN_NOT_OK

            # Open Listening Port
            try:
                self.__mpi_info = MPI.INFO_NULL
                self.__mpi_port_name = MPI.Open_port(self.__mpi_info)
                self.__mpi_port_name_length = len(self.__mpi_port_name)
            except MPI.Exception as ierr:
                self.report_mpi_error(mpi_ierr=ierr,
                                      mpi_operation_name='MPI.Open_port')
                return RETURN_NOT_OK

            # publishing the port name by means of the mmaped file
            mmap_object.seek(0)
            mmap_object[MemMap.PORT_LENGTH_LOC:MemMap.PORT_LENGTH_LENGTH] = \
                self.__mpi_port_name_length.to_bytes(length=MemMap.PORT_LENGTH_LENGTH, byteorder='big')

            mmap_object[MemMap.PORT_NAME_LOC:MemMap.PORT_NAME_LOC + self.__mpi_port_name_length] = \
                bytes('{}'.format(self.__mpi_port_name), 'utf8', )
            mmap_object.flush()

            #
            self.__mpi_expected_source = 0
        else:
            self.__mpi_expected_source = MPI.PROC_NULL

        self.info('waiting for client connection {}'.format(
            self.__mpi_port_name))
        self.__mpi_comm = self.__MPI_COMM_WORLD.Accept(self.__mpi_port_name,
                                                       self.__mpi_info,
                                                       self.__mpi_root)

        self.info('client connected, waiting for PING value')
        ping_value = self.__mpi_comm.recv(source=self.__mpi_expected_source,
                                          tag=self.__mpi_message_tag)

        if self.__mpi_cw_rank == 0:
            if ping_value == self.__ping_value:
                self.info('Gotten expected PING value {} from client'.format(
                    ping_value))
            else:
                self.info(
                    'expected PING value from client should be {}, received {}'
                    .format(self.__ping_value, ping_value))
                self.__mpi_comm.Disconnect()
                return RETURN_NOT_OK
        else:
            # finishing server running on rank > 0 MPI processes
            self.info('Gotten {} from client'.format(ping_value))
            return RETURN_OK

        self.info('sending PONG value')
        self.__mpi_comm.send(self.__pong_value,
                             dest=0,
                             tag=self.__mpi_message_tag)

        while True:
            self.info('Waiting for the poison pill')
            poison_pill = self.__mpi_comm.recv(
                source=self.__mpi_expected_source, tag=self.__mpi_message_tag)

            if poison_pill is None:
                break

            self.info('by waiting for poison pill, {} was received'.format(
                poison_pill))

        self.__mpi_comm.Disconnect()

        if self.__mpi_cw_rank == 0:
            MPI.Close_port(self.__mpi_port_name)
        self.info('Server Processing Done')

        return RETURN_OK
def simulate_TVB_output(path, min_delay):
    '''
    simulate the input of the translator tvb_to_nest
    :param path: the path to the file for the connections
    :param min_delay: the time of one simulation
    :return:
    '''

    print("TVB_OUTPUT : Waiting for port details")
    sys.stdout.flush()
    while not os.path.exists(path):
        print("Port file not found yet, retry in 1 second")
        time.sleep(1)
    '''
    ### OLD Code
    ### TODO: further investigate the '.unlock' file approach
    max_mpi_connection_attempts = 50
    file_unlock=False
    for attempt in range(max_mpi_connection_attempts):
        print("file to read",path);sys.stdout.flush()
        if os.path.exists(path+".unlock"):
            print ("MPI connection file available after t={0} seconds".format(attempt));sys.stdout.flush()
            file_unlock=True
            break

    if file_unlock is False:
        print("Could file not unlocked after 20 attempts, exit");sys.stdout.flush()
        sys.exit (1)
    '''

    fport = open(path, "r")
    port = fport.readline()
    fport.close()
    print('TVB_OUTPUT :wait connection: ' + port + ": " + path)
    sys.stdout.flush()
    comm = MPI.COMM_WORLD.Connect(port)
    print('TVB_OUTPUT :connect to ' + port)
    sys.stdout.flush()

    status_ = MPI.Status()
    starting = 0.0  # the begging of each time of synchronization
    while True:
        # wait until the translator accept the connections
        accept = False
        print("TVB_OUTPUT :wait acceptation")
        sys.stdout.flush()
        while not accept:
            req = comm.irecv(source=0, tag=0)
            accept = req.wait(status_)
        print("TVB_OUTPUT :accepted")
        sys.stdout.flush()
        # TODO: the irecv above is from source 0, so 'source = status_.Get_source()' will be 0.
        # TODO: If the goal was to send from multiple TVB ranks to multiple sources, this needs some work.
        # TODO: essentially this would be an M:N coupling then
        source = status_.Get_source()  # the id of the excepted source
        # create random data
        size = int(min_delay / 0.1)
        rate = np.random.rand(size) * 400
        data = np.ascontiguousarray(rate,
                                    dtype='d')  # format the rate for sending
        shape = np.array(data.shape[0], dtype='i')  # size of data
        times = np.array([starting, starting + min_delay],
                         dtype='d')  # time of stating and ending step
        print("TVB_OUTPUT :send time : " + str(times))
        sys.stdout.flush()
        comm.Send([times, MPI.DOUBLE], dest=source, tag=0)
        print("TVB_OUTPUT :send shape : " + str(shape))
        sys.stdout.flush()
        comm.Send([shape, MPI.INT], dest=source, tag=0)
        print("TVB_OUTPUT :send data : " + str(np.sum(np.sum(data))))
        sys.stdout.flush()
        print("TVB_OUTPUT :send data array : ", data.shape)
        sys.stdout.flush()
        comm.Send([data, MPI.DOUBLE], dest=source, tag=0)
        # print result and go to the next run
        starting += min_delay
        if starting > 10000:
            break
    print("TVB_OUTPUT :ending")
    sys.stdout.flush()
    accept = False
    print("TVB_OUTPUT :wait acceptation")
    sys.stdout.flush()
    while not accept:
        req = comm.irecv(source=0, tag=0)
        accept = req.wait(status_)
    print("TVB_OUTPUT :ending 2")
    sys.stdout.flush()
    comm.Send([times, MPI.DOUBLE], dest=0, tag=1)
    comm.Disconnect()
    MPI.Close_port(port)
    print('TVB_OUTPUT :exit')
    MPI.Finalize()
Esempio n. 17
0
def analyse(path, file, nb_mpi):
    """
    simulate the recorder module
    :param path: the file for the configurations of the connection
    :param file: number of the device and the file connection
    :param nb_mpi: number of mpi rank for testing multi-threading and MPI simulation
    :return:
    """
    data_save = []
    logger = create_logger(path, name='record_' + file)
    # Start communication channels
    path_to_files = path + file + '.txt'
    # For NEST
    # Init connection
    logger.info("Waiting for port details")
    info = MPI.INFO_NULL
    root = 0
    port = MPI.Open_port(info)
    fport = open(path_to_files, "w+")
    fport.write(port)
    fport.close()
    logger.info('wait connection ' + port)
    comm = MPI.COMM_WORLD.Accept(port, info, root)
    logger.info('connect to ' + port)

    #test one rate
    status_ = MPI.Status()
    check = np.empty(1, dtype='b')
    source_sending = np.arange(
        0, comm.Get_remote_size(),
        1)  # list of all the process for the commmunication
    while (True):
        comm.Recv([check, 1, MPI.CXX_BOOL],
                  source=MPI.ANY_SOURCE,
                  tag=MPI.ANY_TAG,
                  status=status_)
        logger.info(" start to send")
        logger.info(" status a tag " + str(status_.Get_tag()) + " source " +
                    str(status_.Get_source()))
        if status_.Get_tag() == 0:
            for i in range(nb_mpi - 1):
                comm.Recv([check, 1, MPI.CXX_BOOL],
                          source=MPI.ANY_SOURCE,
                          tag=0,
                          status=status_)
            for source in source_sending:
                logger.info("source is " + str(source))
                comm.Send([np.array(True, dtype='b'), MPI.BOOL],
                          dest=source,
                          tag=0)
                shape = np.empty(1, dtype='i')
                comm.Recv([shape, 1, MPI.INT],
                          source=source,
                          tag=0,
                          status=status_)
                logger.info("shape is " + str(shape[0]))
                data = np.empty(shape[0], dtype='d')
                comm.Recv([data, shape[0], MPI.DOUBLE],
                          source=status_.Get_source(),
                          tag=0,
                          status=status_)
                data_save.append(
                    copy.deepcopy(data).reshape(int(shape[0] / 3), 3))
                logger.info("data is " + str(data))
        elif status_.Get_tag() == 1:
            logger.info("end run")
            pass
        elif status_.Get_tag() == 2:
            for i in range(nb_mpi - 1):
                logger.info(" receive ending " + str(i))
                comm.Recv([check, 1, MPI.CXX_BOOL],
                          source=MPI.ANY_SOURCE,
                          tag=MPI.ANY_TAG,
                          status=status_)
            logger.info("end simulation")
            break
        else:
            logger.info(str(status_.Get_tag()))
            break
    comm.Disconnect()
    MPI.Close_port(port)
    os.remove(path_to_files)
    logger.info('exit')
    MPI.Finalize()
    np.save(path + '../recording_mpi_' + file + '.npy',
            data_save,
            allow_pickle=True)
def input(path,file,nb_mpi,nb_run,time_sim):
    """
    Simulate some random spike train input
    :param path: the file for the configurations of the connection
    :param file: number of the device and the file connection
    :param nb_mpi: number of mpi of nest
    :param nb_run: the number of run of the simulation (should be more integrate)
    :param time_sim: time of one simulation
    :return:
    """
    logger = create_logger(path,name='input_'+file)
    datas = generate_current(int(file),nb_run,time_sim)
    #Start communication channels
    path_to_files = path + file + '.txt'
    #For NEST
    # Init connection
    logger.info("Waiting for port details")
    info = MPI.INFO_NULL
    root=0
    port = MPI.Open_port(info)
    fport = open(path_to_files, "w+")
    fport.write(port)
    fport.close()
    logger.info('wait connection '+port)
    sys.stdout.flush()
    comm = MPI.COMM_WORLD.Accept(port, info, root)
    logger.info('connect to '+port)

    #test one rate
    status_ = MPI.Status()
    check = np.empty(1,dtype='b')
    source_sending = np.arange(0,nb_mpi,1) # list of all the process for the communication
    starting = 1
    run_x=0
    while True:
        tag = -1
        for source in source_sending:
            comm.Recv([check, 1, MPI.CXX_BOOL], source=source, tag=MPI.ANY_TAG, status=status_)
            if tag == -1:
                tag = status_.Get_tag()
            elif tag != status_.Get_tag():
                raise Exception('bad tags')

        logger.info(" start to send")
        logger.info(" status a tag "+str(status_.Get_tag()))
        if tag == 0 :
            # receive list ids
            size_list = np.empty(1, dtype='i')
            shape = np.array(datas[0][run_x],dtype='i')
            data = np.array(np.concatenate(datas[1][run_x]),dtype='d')
            logger.info("shape data init"+str( data.shape)+ " shape  "+str(shape))
            for source in source_sending:
                logger.info(" source :"+str(source))
                comm.Recv([size_list, 1, MPI.INT], source=source, tag=0, status=status_)
                if size_list[0] != 0 :
                    logger.info("size list id"+str(size_list))
                    list_id = np.empty(size_list, dtype='i')
                    comm.Recv([list_id, size_list, MPI.INT], source=status_.Get_source(), tag=0, status=status_)
                    logger.info("id "+str(list_id))
                    data_send = np.concatenate(np.repeat([data],size_list[0]+1,axis=0))
                    logger.info(data_send)
                    send_shape = np.array(np.concatenate([[data_send.shape[0]], np.repeat(shape,size_list[0])]), dtype='i')
                    comm.Send([send_shape, MPI.INT], dest=status_.Get_source(), tag=list_id[0])
                    logger.info("shape data "+str(send_shape))
                    comm.Send([data_send, MPI.DOUBLE], dest=status_.Get_source(), tag=list_id[0])
                    logger.info("send data "+str( data.shape)+" data send "+str(data_send.shape))
            for source in source_sending:
                comm.Recv([check, 1, MPI.CXX_BOOL], source=source, tag=MPI.ANY_TAG, status=status_)
                logger.info("source "+str(source)+" end run")
            run_x+=1
            starting+=200
        elif(tag == 2):
            logger.info("end simulation")
            logger.info("ending time : "+str(starting))
            break
        else:
            logger.info(tag)
            break
    comm.Disconnect()
    MPI.Close_port(port)
    os.remove(path_to_files)
    logger.info('exit')
    MPI.Finalize()
Esempio n. 19
0
def analyse(path):
    """
    simulate the recorder module
    :param path: the file for the configurations of the connection
    :return:
    """
    #Start communication channels
    path_to_files = path
    #For NEST
    # Init connection
    print("Waiting for port details")
    info = MPI.INFO_NULL
    root = 0
    port = MPI.Open_port(info)
    fport = open(path_to_files, "w+")
    fport.write(port)
    fport.close()
    print('wait connection ' + port)
    comm = MPI.COMM_WORLD.Accept(port, info, root)
    print('connect to ' + port)

    #test one rate
    status_ = MPI.Status()
    check = np.empty(1, dtype='b')
    while (True):
        comm.Recv([check, 1, MPI.CXX_BOOL],
                  source=MPI.ANY_SOURCE,
                  tag=MPI.ANY_TAG,
                  status=status_)
        print(" start to send")
        sys.stdout.flush()
        print(" status a tag ", status_.Get_tag())
        sys.stdout.flush()
        if status_.Get_tag() == 0:
            comm.Send([np.array(True, dtype='b'), MPI.BOOL],
                      dest=status_.Get_source(),
                      tag=0)
            shape = np.empty(1, dtype='i')
            comm.Recv([shape, 1, MPI.INT],
                      source=status_.Get_source(),
                      tag=0,
                      status=status_)
            print("shape is", shape)
            sys.stdout.flush()
            data = np.empty(shape[0], dtype='d')
            comm.Recv([data, shape[0], MPI.DOUBLE],
                      source=status_.Get_source(),
                      tag=0,
                      status=status_)
            print("data is ", data)
            sys.stdout.flush()
            comm.Recv([check, 1, MPI.CXX_BOOL],
                      source=status_.Get_source(),
                      tag=MPI.ANY_TAG,
                      status=status_)
            print("end run")
            sys.stdout.flush()
        elif status_.Get_tag() == 2:
            print("end simulation")
            sys.stdout.flush()
            break
        else:
            print(status_.Get_tag())
            break
    comm.Disconnect()
    MPI.Close_port(port)
    os.remove(path_to_files)
    print('exit')
    MPI.Finalize()
Esempio n. 20
0
Run this with 1 processes like:
$ mpiexec -n 1 python server.py
"""

import numpy as np
from mpi4py import MPI

comm = MPI.COMM_WORLD

service_name = 'compute'
# open a port
port_name = MPI.Open_port()
# bind the opened port to a service_name,
# client can connect to the port by looking-up this service_name
MPI.Publish_name(service_name, port_name)
# wait for client to connect
inter_comm = comm.Accept(port_name)

# receive message from client
recv_obj = inter_comm.recv(source=0, tag=0)
print 'Server receives %s from client.' % recv_obj
send_obj = eval(recv_obj)
# reply the result to the client
print 'Server sends %s to client.' % send_obj
inter_comm.send(send_obj, dest=0, tag=1)

# unpublish the service_name, close the port and disconnect
MPI.Unpublish_name(service_name, port_name)
MPI.Close_port(port_name)
inter_comm.Disconnect()
Esempio n. 21
0
    if version == (4, 0, 0):
        SKIP_POOL_TEST = True
    if version == (4, 0, 1) and sys.platform == 'darwin':
        SKIP_POOL_TEST = True
    if version == (4, 0, 2) and sys.platform == 'darwin':
        SKIP_POOL_TEST = True
if name == 'MPICH':
    if sys.platform == 'darwin':
        if version >= (3, 4) and version < (4, 0):
            SKIP_POOL_TEST = True
    if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
        SKIP_POOL_TEST = True
    port = MPI.Open_port()
    if port == "":
        SKIP_POOL_TEST = True
    MPI.Close_port(port)
    del port
if name == 'MVAPICH2':
    SKIP_POOL_TEST = True
if name == 'MPICH2':
    if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
        SKIP_POOL_TEST = True
if name == 'Microsoft MPI':
    if version < (8, 1, 0):
        SKIP_POOL_TEST = True
    if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
        SKIP_POOL_TEST = True
if name == 'Platform MPI':
    SKIP_POOL_TEST = True
if MPI.Get_version() < (2, 0):
    SKIP_POOL_TEST = True
Esempio n. 22
0
def simulate_nest_generator(path):
    '''
    simulate the spike generator of the translator for tvb to nest
    :param path: the path to the file for the connections
    :return:
    '''
    # Init connection

    max_mpi_connection_attempts = 50
    file_unlock = False
    for attempt in range(max_mpi_connection_attempts):
        print("file to read", path)
        sys.stdout.flush()
        if os.path.exists(path + ".unlock"):
            print("MPI connection file available after t={0} seconds".format(
                attempt))
            sys.stdout.flush()
            file_unlock = True
            break

    if file_unlock is False:
        print("Could file not unlocked after 20 attempts, exit")
        sys.stdout.flush()
        sys.exit(1)

    print("Nest_Input:" + path)
    print("Nest_Input :Waiting for port details")
    sys.stdout.flush()
    fport = open(path, "r")
    port = fport.readline()
    fport.close()
    print("Nest_Input :wait connection " + port)
    sys.stdout.flush()
    comm = MPI.COMM_WORLD.Connect(port)
    print('Nest_Input :connect to ' + port)
    sys.stdout.flush()

    status_ = MPI.Status()
    ids = np.arange(0, 10, 1)  # random id of spike detector
    print(ids)
    sys.stdout.flush()
    while (True):
        # Send start simulation
        comm.Send([np.array([True], dtype='b'), MPI.CXX_BOOL], dest=0, tag=0)
        comm.Send([np.array(10, dtype='i'), MPI.INT], dest=0, tag=0)
        # send ID of spike generator
        comm.Send([np.array(ids, dtype='i'), MPI.INT], dest=0, tag=0)
        # receive the number of spikes for updating the spike detector
        size = np.empty(11, dtype='i')
        comm.Recv([size, 11, MPI.INT], source=0, tag=ids[0], status=status_)
        print("Nest_Input (" + str(ids[0]) + ") :receive size : " + str(size))
        sys.stdout.flush()
        # receive the spikes for updating the spike detector
        data = np.empty(size[0], dtype='d')
        comm.Recv([data, size[0], MPI.DOUBLE],
                  source=0,
                  tag=ids[0],
                  status=status_)
        print("Nest_Input (" + str(id) + ") : " + str(np.sum(data)))
        sys.stdout.flush()
        # printing value and exist
        print("Nest_Input: Before print ")
        sys.stdout.flush()
        if ids[0] == 0:
            print("Nest_Input:" +
                  str([ids[0], data, np.sum(data)]))
            sys.stdout.flush()
        print("Nest_Input: debug end of loop")
        sys.stdout.flush()
        #send ending the the run of the simulation
        print("Nest_Input: Debug before send")
        sys.stdout.flush()
        comm.Send([np.array([True], dtype='b'), MPI.CXX_BOOL], dest=0, tag=1)
        print("Nest_Input: Debug after  send")
        sys.stdout.flush()

        print("Nest_Input: before break")
        sys.stdout.flush()
        # print ("Nest_Input: before break" + str(data > 10000));sys.stdout.flush()
        if np.any(data > 10000):
            break

    # closing the connection at this end
    print('Nest_Input : Disconnect')
    comm.Send([np.array([True], dtype='b'), MPI.CXX_BOOL], dest=0, tag=2)
    comm.Disconnect()
    MPI.Close_port(port)
    print('Nest_Input :exit')
    MPI.Finalize()
def input(path,nb_mpi):
    """
    Simulate some random spike train input
    :param path: the file for the configurations of the connection
    :param nb_mpi: number of mpi rank for testing multi-threading and MPI simulation
    :return:
    """
    #Start communication channels
    path_to_files = path
    #For NEST
    # Init connection
    print("INPUT : Waiting for port details")
    info = MPI.INFO_NULL
    root=0
    port = MPI.Open_port(info)
    fport = open(path_to_files, "w+")
    fport.write(port)
    fport.close()
    print('INPUT : wait connection '+port)
    sys.stdout.flush()
    comm = MPI.COMM_WORLD.Accept(port, info, root)
    print('INPUT : connect to '+port)

    #test one rate
    status_ = MPI.Status()
    check = np.empty(1,dtype='b')
    source_sending = np.arange(0,comm.Get_remote_size(),1) # list of all the process for the commmunication
    starting = 1
    while True:
        comm.Recv([check, 1, MPI.CXX_BOOL], source=0, tag=MPI.ANY_TAG, status=status_)
        print("INPUT :  start to send"); sys.stdout.flush()
        print("INPUT :  status a tag ",status_.Get_tag() ); sys.stdout.flush()
        if status_.Get_tag() == 0 :
            for source in source_sending:
                if source != 0:
                    comm.Recv([check, 1, MPI.CXX_BOOL], source=source, tag=MPI.ANY_TAG, status=status_)
                print("Input : source is", source); sys.stdout.flush()
                # receive list ids
                size_list = np.empty(1, dtype='i')
                comm.Recv([size_list, 1, MPI.INT], source=source, tag=0, status=status_)
                print("INPUT : size list id",size_list);sys.stdout.flush()
                if size_list[0] != 0:
                    list_id = np.empty(size_list, dtype='i')
                    comm.Recv([list_id, size_list, MPI.INT], source=source, tag=0, status=status_)
                    print("INPUT :  id ", list_id);sys.stdout.flush()
                    shape = np.random.randint(0,100,1,dtype='i')
                    data = starting+np.random.rand(shape[0])*200
                    data = np.around(np.sort(np.array(data,dtype='d')),decimals=1)
                    send_shape = np.array(np.concatenate([shape,shape]),dtype ='i')
                    comm.Send([send_shape, MPI.INT], dest=source, tag=list_id[0])
                    print("INPUT :  shape data ",shape);sys.stdout.flush()
                    comm.Send([data, MPI.DOUBLE], dest=source, tag=list_id[0])
                    print("INPUT :  send data", data);sys.stdout.flush()
            for source in source_sending:
                print("INPUT :  end");sys.stdout.flush()
                comm.Recv([check, 1, MPI.CXX_BOOL], source=source, tag=MPI.ANY_TAG, status=status_)
            print("INPUT : end run");sys.stdout.flush()
            starting+=200
        elif(status_.Get_tag() ==2):
            for i in range(nb_mpi-1):
                print(" receive ending");sys.stdout.flush()
                comm.Recv([check, 1, MPI.CXX_BOOL], source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status_)
            print("INPUT : end simulation");sys.stdout.flush()
            print("INPUT : ending time : ",starting)
            break
        else:
            print(status_.Get_tag())
            break
    comm.Disconnect()
    MPI.Close_port(port)
    os.remove(path_to_files)
    print('INPUT : exit')
    MPI.Finalize()