Exemplo n.º 1
0
	def __init__(self, verbose=True):
	
		'''Initialises new Environment objects'''
	
		self.outputDirectory = os.getcwd()
		self.verbose = verbose
	
		#Do the subsequent things once only
		if not Environment.isInitialised:
			#See if a parallel environment is available
			try:
				import pypar
				#If its imported we might have a parallel environment
				Environment.isParallel = True
				self.output('[PEAT-SA] Parallel environment available')
				#Check environment size. 
				#If there is more than one processor there must be a parallel environment
				#If there's only one then its not parallel.
				if pypar.size() == 1:
					self.output('[PEAT-SA] Only one processor - parallel environment disabled')
					Environment.isParallel = False
				else:
					self.output('[PEAT-SA] Parallel environment enabled with %d processors' % pypar.size())
			except BaseException:
				#Importing pypar caused an exception - No parallel environment
				Environment.isParallel = False
				self.output('[PEAT-SA] Parallel environment disabled.\n')

			Environment.isInitialised = True	 
Exemplo n.º 2
0
	def balanceArrays(self, arrayFragment):
	
		'''Redistributes the elements in a set of arrays equally across the nodes'''
		
		if Environment.isParallel:
			import pypar
			if self.isRoot():
				completeArray = arrayFragment
				for i in range(1, pypar.size()):
					fragment = pypar.receive(i)
					completeArray.extend(fragment)
				
				#Divide it up
				divisions = self._divideArray(completeArray)
				
				#Send the fragments
				for i in range(1, pypar.size()):
					start, end = divisions[i]
					pypar.send(completeArray[start:end], i)
					
				self.output('[ENV] Rebalanced array divisions %s' % divisions)	
					
				#Assign root fragment	
				start, end = divisions[0]	
				arrayFragment = completeArray[start:end]
			else:
				#Send the fragment
				pypar.send(arrayFragment, 0)
				#Retrieve the array
				arrayFragment = pypar.receive(0)
		else:
			completeArray = arrayFragment
		
		return arrayFragment
Exemplo n.º 3
0
    def __init__(self, is_parallel=True):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        if is_parallel is True:
            try:
                import pypar
            except ImportError:
                self._not_parallel()
            else:
                if pypar.size() >= 2:
                    self.rank = pypar.rank()
                    self.size = pypar.size()
                    self.node = pypar.get_processor_name()
                    self.is_parallel = True
                    self.file_tag = FILE_TAG_DELIMITER + str(self.rank)
                    self.log_file_tag = FILE_TAG_DELIMITER + str(self.rank)
                else:
                    self._not_parallel()
        else:
            self._not_parallel()

        # Some constants to identify messages
        self.load_event_set = 0
Exemplo n.º 4
0
	def _divideArray(self, array):
	
		'''Divides an array roughly equally depending on the environment size
		
		Returns: A list with one entry for each node.
		The entry is a tuple giving the start and end elements in array 
		that should be assigned to that node.'''
	
		import pypar
	
		#Divide evenly then add remainder elements to processors
		maxElements = int(math.floor(len(array)/pypar.size()))
		remainder = len(array) - maxElements*pypar.size()
		start = 0
		end = 0
		divisions = []
		for i in range(pypar.size()):
			start = end
			end = end + maxElements
			if remainder != 0:
				end = end + 1
				remainder = remainder - 1
			divisions.append((start, end))
			
		return divisions									
Exemplo n.º 5
0
    def __init__(self, is_parallel=True):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        if is_parallel is True:
            try:
                import pypar
            except ImportError:
                self._not_parallel()
            else:
                if pypar.size() >= 2:
                    self.rank = pypar.rank()
                    self.size = pypar.size()
                    self.node = pypar.get_processor_name()
                    self.is_parallel = True
                    self.file_tag = FILE_TAG_DELIMITER + str(self.rank)
                    self.log_file_tag = FILE_TAG_DELIMITER + str(self.rank)
                else:
                    self._not_parallel()
        else:
            self._not_parallel()

        # Some constants to identify messages
        self.load_event_set = 0
Exemplo n.º 6
0
Arquivo: mpi.py Projeto: whigg/PySOL
def gather(obj):
    if root():
        result = [None for i in xrange(p.size())]
        result[0] = obj
        for i in xrange(p.size() - 1):
            result[i + 1] = p.receive(i + 1)
        return result
    else:
        p.send(obj, 0)
Exemplo n.º 7
0
def gather( obj ):
	if root():
		result = [ None for i in xrange(p.size()) ]
		result[0] = obj
		for i in xrange(p.size()-1):
			result[i+1] = p.receive(i+1)
		return result
	else:
		p.send(obj,0)
Exemplo n.º 8
0
def _mpi_end_embarrass():
    global _mpi_initialized
    if _mpi_initialized:
        import pypar
        print(pypar.rank() + 1, " of ", pypar.size(), ": BARRIER")
        pypar.barrier()
        print(pypar.rank() + 1, " of ", pypar.size(), ": FINALIZE")
        pypar.finalize()
        _mpi_initialized = False
    else:
        print("Non-MPI run : Exit without MPI_Finalize")
    def __init__(self,
                 coordinates,
                 vertices,
                 boundary = None,
                 full_send_dict = None,
                 ghost_recv_dict = None,
                 velocity = None):

        Domain.__init__(self,
                        coordinates,
                        vertices,
                        boundary,
                        velocity = velocity,
                        full_send_dict=full_send_dict,
                        ghost_recv_dict=ghost_recv_dict,
                        processor=pypar.rank(),
                        numproc=pypar.size()
                        )

        N = self.number_of_elements


        self.communication_time = 0.0
        self.communication_reduce_time = 0.0


        print 'processor',self.processor
        print 'numproc',self.numproc
Exemplo n.º 10
0
Arquivo: mpi.py Projeto: whigg/PySOL
def broadcast(obj):
    if root():
        for i in xrange(p.size() - 1):
            p.send(obj, i + 1)
        return obj
    else:
        return p.receive(0)
Exemplo n.º 11
0
Arquivo: mpi.py Projeto: whigg/PySOL
def scatter(vec):
    if root():
        for i in xrange(p.size() - 1):
            p.send(vec[i + 1], i + 1)
        return vec[0]
    else:
        return p.receive(0)
Exemplo n.º 12
0
def distributed_generator(iterable):
    """
    Distribute the values from a generator across workers.
    """
    RUN, DIE = range(2)
    P = pp.size()
    if P == 1:
        for el in iterable:
            yield el
    else:
        if pp.rank() == 0:
            it = iter(iterable)
            while True:
                try:
                    first = next(it)
                    for p in range(1, P):
                        pp.send(next(it), p, tag=RUN)
                    yield first
                except StopIteration:
                    for p in range(1, P):
                        pp.send(666, p, tag=DIE)
                    break
        else:
            while True:
                el, status = pp.receive(0, tag=pp.any_tag, return_status=True)
                if status.tag == DIE:
                    break
                yield el
Exemplo n.º 13
0
   def __init__(self, LookPos, LookDir, LookYaw, WindowRows = 40, WindowCols = 40):
      self.LookPos = np.array(LookPos)
      self.LookDir = np.array(LookDir)
      self.Yaw = LookYaw
      self.WindowRows = WindowRows
      self.WindowCols = WindowCols
      rhop = np.linalg.norm(np.array([LookDir[0],LookDir[1]]))
      self.__Lon = math.atan2(LookDir[1], LookDir[0])
      self.__Lat = math.atan2(LookDir[2],rhop)
      self.start = time.time()
      
      # initialize the MPI
      self.numproc = pypar.size()
      self.myid =    pypar.rank()
      self.node =    pypar.get_processor_name()
      
      if self.myid != self.numproc - 1:
         self.Rows = self.WindowRows/self.numproc
         self.RowEnd = self.WindowRows/self.numproc * (self.myid+1) - 1
      else:
         self.Rows = self.WindowRows/self.numproc + self.WindowRows%self.numproc
         self.RowEnd = self.WindowRows

      self.RowStart = self.WindowRows/self.numproc * self.myid
      self.Window = np.zeros(shape = (self.Rows, self.WindowCols))
Exemplo n.º 14
0
def rec_submesh(p, verbose=True):

    import pypar
    
    numproc = pypar.size()
    myid = pypar.rank()

    [submesh_cell, triangles_per_proc,\
     number_of_full_nodes, number_of_full_triangles] = rec_submesh_flat(p,verbose)
    
    # find the full triangles assigned to this processor

    lower_t = 0
    for i in range(myid):
        lower_t = lower_t+triangles_per_proc[i]
    upper_t = lower_t+triangles_per_proc[myid]

    # convert the information into a form needed by the GA
    # datastructure

    [GAnodes, GAtriangles, boundary, quantities, \
     ghost_rec, full_send, \
     tri_map, node_map, tri_l2g, node_l2g, \
     ghost_layer_width] = \
     build_local_mesh(submesh_cell, lower_t, upper_t, numproc)
    
    return GAnodes, GAtriangles, boundary, quantities,\
           ghost_rec, full_send,\
           number_of_full_nodes, number_of_full_triangles, tri_map, node_map,\
           tri_l2g, node_l2g, ghost_layer_width
Exemplo n.º 15
0
    def __init__(self,
                 coordinates,
                 vertices,
                 boundary=None,
                 full_send_dict=None,
                 ghost_recv_dict=None,
                 velocity=None):

        Domain.__init__(self,
                        coordinates,
                        vertices,
                        boundary,
                        velocity=velocity,
                        full_send_dict=full_send_dict,
                        ghost_recv_dict=ghost_recv_dict,
                        processor=pypar.rank(),
                        numproc=pypar.size())

        N = self.number_of_elements

        self.communication_time = 0.0
        self.communication_reduce_time = 0.0

        print 'processor', self.processor
        print 'numproc', self.numproc
Exemplo n.º 16
0
def rec_submesh(p, verbose=True):

    import pypar

    numproc = pypar.size()
    myid = pypar.rank()

    [submesh_cell, triangles_per_proc,\
     number_of_full_nodes, number_of_full_triangles] = rec_submesh_flat(p,verbose)

    # find the full triangles assigned to this processor

    lower_t = 0
    for i in range(myid):
        lower_t = lower_t + triangles_per_proc[i]
    upper_t = lower_t + triangles_per_proc[myid]

    # convert the information into a form needed by the GA
    # datastructure

    [GAnodes, GAtriangles, boundary, quantities, \
     ghost_rec, full_send, \
     tri_map, node_map, tri_l2g, node_l2g, \
     ghost_layer_width] = \
     build_local_mesh(submesh_cell, lower_t, upper_t, numproc)

    return GAnodes, GAtriangles, boundary, quantities,\
           ghost_rec, full_send,\
           number_of_full_nodes, number_of_full_triangles, tri_map, node_map,\
           tri_l2g, node_l2g, ghost_layer_width
Exemplo n.º 17
0
def _mpi_assign_thread(job_iterator):
    # Sit idle until request for a job comes in, then assign first
    # available job and move on. Jobs are labelled through the
    # provided iterator
    import pypar
    import pypar.mpiext

    j = -1

    print("Manager --> Entered iterator code")

    alive = [True for i in range(pypar.size())]

    while any(alive[1:]):
        dest = pypar.receive(source=pypar.mpiext.MPI_ANY_SOURCE, tag=1)
        try:
            time.sleep(0.05)
            j = job_iterator.next()[0]
            print("Manager --> Sending job", j, "to rank", dest)
        except StopIteration:
            alive[dest] = False
            print("Manager --> Sending out of job message to ", dest)
            j = None

        pypar.send(j, destination=dest, tag=2)

    print("Manager --> All jobs done and all processors>0 notified; exiting thread")
Exemplo n.º 18
0
	def splitArray(self, array):
	
		'''Splits array between all the processes in the environment.
		
		Each process will be returned a different section of the array to work on'''
	
		if Environment.isParallel:
			import pypar
			#Split the array into sections and return the section for this processor
			divisions = []
			if self.isRoot():
				#Root does the splitting - we send each processor the start and end index
				#NOTE: pypar broadcast won't work even when setting vanilla
				#It always returns message trucated error.
				divisions = self._divideArray(array)	
				for i in range(1,pypar.size()):
					pypar.send(divisions[i], i)
				start = divisions[0][0]
				end = divisions[0][1]
			else:	
				indexes = pypar.receive(0)
				start = indexes[0]
				end = indexes[1]
				
			return array[start:end]
		else:
			return array
Exemplo n.º 19
0
def broadcast( obj ):
	if root():
		for i in xrange(p.size()-1):
			p.send(obj,i+1)
		return obj
	else:
		return p.receive(0)
Exemplo n.º 20
0
def mpi_sync_db(session):
    """Causes the halo_db module to use the rank 0 processor's 'Creator' object"""

    global _mpi_initialized

    if _mpi_initialized:
        import pypar
        import halo_db as db

        if pypar.rank() == 0:
            x = session.merge(db._current_creator)
            session.commit()
            time.sleep(0.5)
            print("Manager --> transmit run ID=", x.id)
            for i in range(1, pypar.size()):
                pypar.send(x.id, tag=3, destination=i)

            db._current_creator = x

        else:
            ID = pypar.receive(source=0, tag=3)
            print("Rank", pypar.rank(), " --> set run ID=", ID)
            db._current_creator = session.query(
                db.Creator).filter_by(id=ID).first()
            print(db._current_creator)

    else:
        print("NOT syncing DB references: MPI unavailable")
Exemplo n.º 21
0
def scatter( vec ):
	if root():
		for i in xrange(p.size()-1):
			p.send(vec[i+1],i+1)
		return vec[0]
	else:
		return p.receive(0)
Exemplo n.º 22
0
 def _get_stations_local(self):
     all_stations = self._get_all_stations()
     num_stations = len(all_stations)
     mpisize = pypar.size()
     stations_per_proc = num_stations / mpisize
     leftover = num_stations % mpisize
     _station_distr = []
     ind_start = 0
     ind_end = 0
     rank = pypar.rank()
     for ind_proc in range(mpisize):
         ind_start = ind_end
         count = stations_per_proc
         if ind_proc < leftover:
             count += 1
         ind_end = ind_start + count
         _station_distr.append((ind_start, ind_end))
     start_local, end_local = _station_distr[rank]
     
     print pypar.rank(), stations_per_proc, leftover, start_local, end_local
     if start_local >= num_stations:
         self.stations_local = set()
         self.say('No enough stations for this process')
     else:
         self.say('Station range: %i -- %i (%i)' % (start_local, end_local, num_stations))
         self.stations_local = set(all_stations[start_local:end_local])
     assert self.stations_local or not self.is_master
Exemplo n.º 23
0
    def __init__(self):

        self.proc = pypar.size()
        self.myid = pypar.rank()
        self.node = pypar.get_processor_name()

        return
Exemplo n.º 24
0
def run():
    """
    Run the process, handling any parallelisation.
    """
    
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--config",
                        help="Configuration file",
                        type=str)
    parser.add_argument("-i", "--inputfile",
                        help="Input DEM file (ascii format)",
                        type=str)
    parser.add_argument("-o", "--output", 
                        help="Output path",
                        type=str)
    parser.add_argument("-v", "--verbose", 
                        help=("Verbose output (not available when invoking"
                                "parallel run)") )
                                
    args = parser.parse_args() 
                          
    logfile = 'topomult.log'
    loglevel = 'INFO'
    
    if args.verbose:
        verbose = args.verbose
    else:
        verbose = False

    if args.config:
        cfg = ConfigParser.ConfigParser()
        cfg.read(args.config)

        input_file = cfg.get('Input', 'Filename')
        output_path = cfg.get('Output', 'Path')
        logfile = cfg.get('Logging', 'LogFile')
        loglevel = cfg.get('Logging', 'LogLevel')
        verbose = cfg.get('Logging', 'Verbose')
        
    if args.inputfile:
        input_file = args.inputfile

    if args.output:
        output_path = args.output
    
    attemptParallel()
    if pp.size() > 1 and pp.rank() > 0:
        logfile += '-' + str(pp.rank())
        verbose = False  # to stop output to console

    flStartLog(logfile, loglevel, verbose)
    
    pp.barrier()
    work(input_file, output_path,
             ['n','s','e','w','ne','nw','se','sw'])
    pp.barrier()
    
    pp.finalize()
Exemplo n.º 25
0
Arquivo: mpi.py Projeto: whigg/PySOL
def broadcast_vec(vec, i):
    myid = p.rank()
    if myid == i:
        for j in xrange(p.size()):
            if j != myid:
                p.send(vec[i], j)
    else:
        vec[i] = p.receive(i)
Exemplo n.º 26
0
def broadcast_vec( vec, i ):
	myid = p.rank()
	if myid == i:
		for j in xrange(p.size()):
			if j != myid:
				p.send(vec[i],j)
	else:
		vec[i] = p.receive(i)
Exemplo n.º 27
0
Arquivo: mpi.py Projeto: whigg/PySOL
def all_gather(obj):
    myid = p.rank()
    nproc = p.size()
    result = [None for i in xrange(nproc)]
    result[myid] = obj
    for i in xrange(nproc):
        broadcast_vec(result, i)
    return result
Exemplo n.º 28
0
def all_gather( obj ):
	myid = p.rank()
	nproc = p.size()
	result = [ None for i in xrange(nproc) ]
	result[myid] = obj
	for i in xrange(nproc):
		broadcast_vec(result,i)
	return result
Exemplo n.º 29
0
def Inlet_operator(domain,
                   poly,
                   Q,
                   velocity=None,
                   default=None,
                   description=None,
                   label=None,
                   logging=False,
                   master_proc=0,
                   procs=None,
                   verbose=False):

    # If not parallel domain then allocate serial Inlet operator
    if isinstance(domain, Parallel_domain) is False:
        if verbose: print "Allocating non parallel inlet operator ....."
        return anuga.structures.inlet_operator.Inlet_operator(
            domain,
            poly,
            Q,
            velocity=velocity,
            default=default,
            description=description,
            label=label,
            logging=logging,
            verbose=verbose)

    import pypar
    if procs is None:
        procs = range(0, pypar.size())

    myid = pypar.rank()

    poly = num.array(poly, dtype='d')

    alloc, inlet_master_proc, inlet_procs, enquiry_proc = allocate_inlet_procs(
        domain, poly, master_proc=master_proc, procs=procs, verbose=verbose)

    if alloc:
        if verbose and myid == inlet_master_proc:
            print "Parallel Inlet Operator ================="
            print "Poly = " + str(poly)
            print "Master Processor is P%d" % (inlet_master_proc)
            print "Processors are P%s" % (inlet_procs)
            print "========================================="

        return Parallel_Inlet_operator(domain,
                                       poly,
                                       Q,
                                       velocity=velocity,
                                       default=default,
                                       description=description,
                                       label=label,
                                       logging=logging,
                                       master_proc=inlet_master_proc,
                                       procs=inlet_procs,
                                       verbose=verbose)
    else:
        return None
Exemplo n.º 30
0
def run_client():
    '''
    Runs
    '''
    # Identification
    myid = pypar.rank() # id of this process
    nproc = pypar.size() # number of processors

    print "I am client", myid
    pypar.finalize()
Exemplo n.º 31
0
def run_client():
    '''
    Runs
    '''
    # Identification
    myid = pypar.rank()  # id of this process
    nproc = pypar.size()  # number of processors

    print "I am client", myid
    pypar.finalize()
Exemplo n.º 32
0
Arquivo: mpi.py Projeto: whigg/PySOL
def p_sum_all(vec):
    nproc = p.size()
    pcs = [None for i in xrange(nproc)]
    if root():
        for i in xrange(nproc):
            pcs[i] = vec[i::nproc]
    temp = scatter(pcs)
    temp = sum(temp)
    pcs = all_gather(temp)
    return sum(pcs)
Exemplo n.º 33
0
    def calc_participant_list(s):
        for rank in xrange(1, mpi.size()):
            N1 = s.Nx_sum_list[rank - 1]
            N2 = s.Nx_sum_list[rank]
            if s.gi1 >= N1 and s.gi1 <= N2:
                start_rank = rank
            if s.gi2 >= N1 and s.gi2 <= N2:
                end_rank = rank

        return range(start_rank, end_rank + 1)
Exemplo n.º 34
0
def one_example():
    txt = ["yes", "no", "when", "what the", "a", "5ive!"]

    rank = pypar.rank()
    size = pypar.size()

    print "I am processor %d of %d. " % (rank, size)
    for i, ele in enumerate(txt):
        if i % size == rank:
            print "i" + str(i) + " P" + str(rank) + " len " + str(len(ele)) + " for " + ele
Exemplo n.º 35
0
	def calc_participant_list( s ):
		for rank in xrange( 1, mpi.size() ):
			N1 = s.Nx_sum_list[rank-1]
			N2 = s.Nx_sum_list[rank] 
			if s.gi1 >= N1 and s.gi1 <= N2: 
				start_rank = rank
			if s.gi2 >= N1 and s.gi2 <= N2: 
				end_rank = rank

		return range( start_rank, end_rank+1 )
Exemplo n.º 36
0
def p_sum_all( vec ):
	nproc = p.size()
	pcs = [ None for i in xrange(nproc) ]
	if root():
		for i in xrange(nproc):
			pcs[i] = vec[i::nproc]
	temp = scatter(pcs)
	temp = sum(temp)
	pcs = all_gather(temp)
	return sum(pcs)
Exemplo n.º 37
0
def one_example():
    txt = ["yes", "no", "when", "what the", "a", "5ive!"]

    rank = pypar.rank()
    size = pypar.size()

    print "I am processor %d of %d. " % (rank, size)
    for i, ele in enumerate(txt):
        if i % size == rank:
            print "i" + str(i) + " P" + str(rank) + " len " + str(
                len(ele)) + " for " + ele
Exemplo n.º 38
0
    def test_string(self):
        myid, ncpu = pp.rank(), pp.size()
        data = 'ABCDEFGHIJKLMNOP'  # Length = 16
        NP = len(data) / ncpu
        X = ' ' * NP

        pp.scatter(data, 0, buffer=X)  # With buffer
        Y = pp.scatter(data, 0)  # With buffer automatically created

        self.assertEqual(X, Y)
        self.assertEqual(Y, data[myid * NP:(myid + 1) * NP])
        self.assertEqual(X, data[myid * NP:(myid + 1) * NP])
Exemplo n.º 39
0
    def test_string(self):
        myid, ncpu = pp.rank(), pp.size()
        data = 'ABCDEFGHIJKLMNOP'  # Length = 16
        NP = len(data) / ncpu
        X = ' ' * NP

        pp.scatter(data, 0, buffer=X)  # With buffer
        Y = pp.scatter(data, 0)  # With buffer automatically created

        self.assertEqual(X, Y)
        self.assertEqual(Y, data[myid * NP:(myid + 1) * NP])
        self.assertEqual(X, data[myid * NP:(myid + 1) * NP])
Exemplo n.º 40
0
    def test_without_root(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 16
        NP = N / ncpu
        data = np.array(range(N)).astype('i')
        X = np.zeros(NP).astype('i')

        pp.scatter(data, buffer=X)  # With buffer
        Y = pp.scatter(data)  # With buffer automatically created

        self.assertTrue(np.allclose(X, Y))
        self.assertTrue(np.allclose(X, data[myid * NP:(myid + 1) * NP]))
        self.assertTrue(np.allclose(Y, data[myid * NP:(myid + 1) * NP]))
Exemplo n.º 41
0
    def test_without_root(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 16
        NP = N / ncpu
        data = np.array(range(N)).astype('i')
        X = np.zeros(NP).astype('i')

        pp.scatter(data, buffer=X)  # With buffer
        Y = pp.scatter(data)  # With buffer automatically created

        self.assertTrue(np.allclose(X, Y))
        self.assertTrue(np.allclose(X, data[myid * NP:(myid + 1) * NP]))
        self.assertTrue(np.allclose(Y, data[myid * NP:(myid + 1) * NP]))
Exemplo n.º 42
0
def p_dot_all( a, b ):
	nproc = p.size()
	va = [ None for i in xrange(nproc) ]
	vb = [ None for i in xrange(nproc) ]
	if root():
		for i in xrange(nproc):
			va[i] = a[i::nproc]
			vb[i] = b[i::nproc]
	ta = scatter(va)
	tb = scatter(vb)
	pv = [ ta[i]*tb[i] for i in xrange(len(ta)) ]
	ps = sum(pv)
	rv = all_gather(ps)
	return sum(rv)
Exemplo n.º 43
0
Arquivo: mpi.py Projeto: whigg/PySOL
def p_dot_all(a, b):
    nproc = p.size()
    va = [None for i in xrange(nproc)]
    vb = [None for i in xrange(nproc)]
    if root():
        for i in xrange(nproc):
            va[i] = a[i::nproc]
            vb[i] = b[i::nproc]
    ta = scatter(va)
    tb = scatter(vb)
    pv = [ta[i] * tb[i] for i in xrange(len(ta))]
    ps = sum(pv)
    rv = all_gather(ps)
    return sum(rv)
Exemplo n.º 44
0
 def _get_statistic(self, source, statistic):
     if self.is_master:
         self.work_locally = True
         statistic_by_station = statistician.DatabaseStatistician._get_statistic(self, source, statistic)
         self.work_locally = False
         for sender in range(1, pypar.size()):
             statistic_by_station.update(pypar.receive(sender))
     else:
         if len(self.stations_local) > 0:
             statistic_by_station = statistician.DatabaseStatistician._get_statistic(self, source, statistic)
         else:
             statistic_by_station = {}
         pypar.send(statistic_by_station, root)
     return statistic_by_station
Exemplo n.º 45
0
    def __init__(self):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        try:
            import pypar   # pylint: disable=W0404
        except ImportError:
            self._not_parallel()
        else:
            if pypar.size() >= 2:
                self.rank = pypar.rank()
                self.size = pypar.size()
                self.node = pypar.get_processor_name()
                self.is_parallel = True
                self.file_tag = str(self.rank)
                self.log_file_tag = str(self.rank)

                # Ensure a clean MPI exit
                atexit.register(pypar.finalize)
            else:
                self._not_parallel()
Exemplo n.º 46
0
    def __init__(self):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        try:
            import pypar  # pylint: disable=W0404
        except ImportError:
            self._not_parallel()
        else:
            if pypar.size() >= 2:
                self.rank = pypar.rank()
                self.size = pypar.size()
                self.node = pypar.get_processor_name()
                self.is_parallel = True
                self.file_tag = str(self.rank)
                self.log_file_tag = str(self.rank)

                # Ensure a clean MPI exit
                atexit.register(pypar.finalize)
            else:
                self._not_parallel()
Exemplo n.º 47
0
	def loadSkew(self, numberTasks):
	
		'''Computes the skew in the number of tasks to be processed by each node
		
		The skew is the standard deviation of the task number across the nodes'''				
										
		if Environment.isParallel:
			import pypar
			if self.isRoot():
				taskDistribution = [numberTasks]
				for i in range(1, pypar.size()):
					numberTasks = pypar.receive(i)
					taskDistribution.append(numberTasks)
				
				mean = reduce(operator.add, taskDistribution)
				mean = mean/float(len(taskDistribution))
				
				#Std. dev
				stdev = 0
				for el in taskDistribution:
					stdev = stdev + math.pow((el - mean), 2)
					
				skew = stdev/float(len(taskDistribution))
				skew = math.sqrt(skew)	
				
				for i in range(1, pypar.size()):
					pypar.send(skew, i)
				
			else:
				#Send the fragment
				pypar.send(numberTasks, 0)
				#Retrieve the array
				skew = pypar.receive(0)
		else:
			skew = 0								
										
		return skew
Exemplo n.º 48
0
	def combineDictionary(self, dictFragment):
	
		'''Combines a set of arrayFragments from each processor into one array'''

		if Environment.isParallel:
			import pypar
			if self.isRoot():
				completeDict = dictFragment
				for i in range(1, pypar.size()):
					fragment = pypar.receive(i)
					completeDict.update(fragment)
				
				#Send the array
				for i in range(1, pypar.size()):
					pypar.send(completeDict, i)
			else:
				#Send the fragment
				pypar.send(dictFragment, 0)
				#Retrieve the array
				completeDict = pypar.receive(0)
		else:
			completeDict = dictFragment
		
		return completeDict			
Exemplo n.º 49
0
def collect_arr(arr):
    """
    A useful collection routine for pypar.
    If you are using pypar to parallelize the set of nested loops and fill
    the resulting array, you usually need to combine the resulting array
    from several mpi threads. In that case you just can execute
    res=collect_arr(res)
    And it will add the arrays from all the threads and store them in
    the thread number 0
    """
    if pypar.rank() > 0:
        pypar.send(arr, 0)
    else:
        for i in range(1, pypar.size()):
            arr = arr + pypar.receive(i)
    return arr
Exemplo n.º 50
0
    def test_longint_array(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 17  # Number of elements

        if myid == 0:
            A = np.array(range(N)).astype('l')
            B = np.zeros(N).astype('l')

            pp.send(A, 1, use_buffer=True)
            B = pp.receive(ncpu - 1, buffer=B)

            self.assertTrue(np.allclose(A, B))
        else:
            X = np.zeros(N).astype('l')
            X = pp.receive(myid - 1, buffer=X)
            pp.send(X, (myid + 1) % ncpu, use_buffer=True)
Exemplo n.º 51
0
    def test_longint_array(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 17  # Number of elements

        if myid == 0:
            A = np.array(range(N)).astype('l')
            B = np.zeros(N).astype('l')

            pp.send(A, 1, use_buffer=True)
            B = pp.receive(ncpu - 1, buffer=B)

            self.assertTrue(np.allclose(A, B))
        else:
            X = np.zeros(N).astype('l')
            X = pp.receive(myid - 1, buffer=X)
            pp.send(X, (myid + 1) % ncpu, use_buffer=True)
Exemplo n.º 52
0
def collect_arr(arr):
    """
    A useful collection routine for pypar.
    If you are using pypar to parallelize the set of nested loops and fill
    the resulting array, you usually need to combine the resulting array
    from several mpi threads. In that case you just can execute
    res=collect_arr(res)
    And it will add the arrays from all the threads and store them in
    the thread number 0
    """
    if pypar.rank() > 0:
        pypar.send(arr, 0)
    else:
        for i in range(1, pypar.size()):
            arr = arr + pypar.receive(i)
    return arr
Exemplo n.º 53
0
	def broadcast(self, data, process):
	
		'''Broadcasts data from process to all other nodes'''
		
		if Environment.isParallel:
			import pypar
			if self.rank() == process:
				#NOTE: pypar broadcast won't work even when setting vanilla
				#It always returns message trucated error.
				for i in range(pypar.size()):
					if i != self.rank():
						pypar.send(data, i)
			else:	
				data = pypar.receive(process)
		
		return data
Exemplo n.º 54
0
def allreduce(x, op, buffer=None, vanilla=0, bypass=False):
    """Allreduce elements in x to buffer (of the same size as x)
       applying operation op elementwise.

       If bypass is True, all admin and error checks
       get bypassed to reduce the latency.
       The buffer must be specified explicitly in this case.
    """

    if bypass:
        allreduce_array(x, buffer, op)
        return


    import types
    from pypar import size
    numproc = size()         # Needed to determine buffer size



    # Create metadata about object
    protocol, typecode, size, shape = create_control_info(x)

    # Allreduce
    if protocol == 'array':
        if buffer is None:
            buffer = zeros(size*numproc, typecode)

            # Modify shape along axis=0 to match size
            shape = list(shape)
            shape[0] *= numproc
            buffer = reshape(buffer, shape)


        msg = 'Data array and buffer must have same type '
        msg = 'in allreduce. I got types "%s" and "%s"' % (x.dtype.char,
                                                        buffer.dtype.char)
        assert x.dtype.char == buffer.dtype.char, msg
        allreduce_array(x, buffer, op)


    elif (protocol == 'vanilla' or protocol == 'string'):
        raise 'Protocol: %s unsupported for allreduce' % protocol
    else:
        raise 'Unknown protocol: %s' % protocol

    return buffer
Exemplo n.º 55
0
def collected(iterable):
    """
    Collect iterables back to the master.
    """
    P = pp.size()
    if P == 1:
        for el in iterable:
            yield el
    else:
        if pp.rank() == 0:
            results = list(iterable)
            for p in range(1, P):
                pres = pp.receive(p)
                results.extend(pres)
            for el in results:
                yield el
        else:
            pp.send(list(iterable), 0)
Exemplo n.º 56
0
    def test_diff_master(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 16
        NP = N / ncpu

        check = np.array(range(N)).astype('i')
        X = np.zeros(NP).astype('i')

        data = np.empty(N, 'i')

        if myid == 0:  # only generated on master
            data = np.array(range(N)).astype('i')

        pp.scatter(data, 0, buffer=X)  # With buffer
        Y = pp.scatter(data, 0)  # With buffer automatically created

        self.assertTrue(np.allclose(X, Y))
        self.assertTrue(np.allclose(X, check[myid * NP:(myid + 1) * NP]))
        self.assertTrue(np.allclose(Y, check[myid * NP:(myid + 1) * NP]))
Exemplo n.º 57
0
def embarrass(file_list, pre_roll=0, post_roll=0):
    """Get a file list for this node (embarrassing parallelization)"""
    global _mpi_initialized

    if type(file_list) == set:
        file_list = list(file_list)

    import sys
    try:
        proc = int(sys.argv[-2])
        of = int(sys.argv[-1])
    except (IndexError, ValueError):
        if pre_roll != 0 or post_roll != 0:
            raise AssertionError(
                "Pre/post-roll no longer supported for MPI -- non-contiguuous")
        print("Trying to run in MPI mode...")
        import pypar

        _mpi_initialized = True

        proc = pypar.rank() + 1
        of = pypar.size()
        print("Success!", proc, "of", of)

        return _mpi_iterate(file_list)

    i = (len(file_list) * (proc - 1)) / of
    j = (len(file_list) * proc) / of - 1
    assert proc <= of and proc > 0
    if proc == of:
        j += 1
    print(proc, "processing", i, j, "(inclusive)")

    i -= pre_roll
    j += post_roll

    if i < 0:
        i = 0
    if j >= len(file_list):
        j = len(file_list) - 1

    return file_list[i:j + 1]