def testNamePublishing(self): rank = MPI.COMM_WORLD.Get_rank() service = "mpi4py-%d" % rank port = MPI.Open_port() MPI.Publish_name(service, port) found = MPI.Lookup_name(service) self.assertEqual(port, found) MPI.Unpublish_name(service, port) MPI.Close_port(port)
def stop_manager(): rank = MPI.COMM_WORLD.Get_rank() print rank if rank == 0: info = MPI.INFO_NULL service = "job manager" port = MPI.Lookup_name(service, info) comm = MPI.COMM_WORLD.Connect(port, info, rank) comm.send([], dest=0, tag=tags.STOP) comm.Disconnect()
def main_client(COMM): assert COMM.Get_size() == 1 service, info = 'cpi', MPI.INFO_NULL port = MPI.Lookup_name(service, info) log(COMM, "service '%s' found in port '%s'.", service, port) log(COMM, "connecting to server ...") icomm = COMM.Connect(port, info, root=0) log(COMM, "server connected.") master(icomm) log(COMM, "disconnecting from server ...") icomm.Disconnect() log(COMM, "server disconnected.")
def add_jobs(event_ids, resource_paths, ompi_server_file, **kwargs): import cuttsum.events import cuttsum.corpora events = [event for event in cuttsum.events.get_events() if event.query_num in set(event_ids)] resources = [] for resource_path in resource_paths: mods = resource_path.split(".") class_name = mods[-1] package_path = ".".join(mods[:-1]) mod = __import__(package_path, fromlist=[class_name]) clazz = getattr(mod, class_name) resource = clazz() resources.append(resource) rank = MPI.COMM_WORLD.Get_rank() if rank == 0: info = MPI.INFO_NULL service = "job manager" port = MPI.Lookup_name(service, info) comm = MPI.COMM_WORLD.Connect(port, info, rank) n_jobs = 0 n_units = 0 jobs = [] for event in events: if event.query_id.startswith("TS13"): corpus = cuttsum.corpora.EnglishAndUnknown2013() elif event.query_id.startswith("TS14"): corpus = cuttsum.corpora.SerifOnly2014() else: raise Exception("Bad query id: {}".format(event.query_id)) for resource in resources: print "Adding job", event, corpus, resource n_jobs += 1 for i in xrange(5): n_units += 1 jobs.append((event, corpus, resource, i)) print "Added", n_jobs, "jobs comprising", n_units, "units of work." comm.send(jobs, dest=0, tag=tags.ADD_JOB) comm.Disconnect()
def MPI_register(self): first = self.request('connect') # self.verbose = (first == 'first') info = MPI.INFO_NULL service = 'parallel-training' port = MPI.Lookup_name(service, info) self.intercomm = MPI.COMM_WORLD.Connect(port, info, root=0) self.config['irank'] = self.intercomm.rank # size on the local side self.config['isize'] = self.intercomm.size # size on the remote side self.config['iremotesize'] = self.intercomm.remote_size test_intercomm(self.intercomm, rank=1)
# client.py """ Client side of the MPI client/server programming model. Run this with 1 processes like: $ mpiexec -n 1 python client.py """ import numpy as np from mpi4py import MPI comm = MPI.COMM_WORLD service_name = 'compute' # get the opened port of the server by looking-up a service_name port_name = MPI.Lookup_name(service_name) # connect to the server inter_comm = comm.Connect(port_name) # send message to the server send_obj = '1 + 2' print 'Client sends %s to server.' % send_obj inter_comm.send(send_obj, dest=0, tag=0) # get results from the server recv_obj = inter_comm.recv(source=0, tag=1) print 'Client receives %s from server.' % recv_obj # disconnect from the server inter_comm.Disconnect()