コード例 #1
0
def mpi_sync_db(session):
    """Causes the halo_db module to use the rank 0 processor's 'Creator' object"""

    global _mpi_initialized

    if _mpi_initialized:
        import pypar
        import halo_db as db

        if pypar.rank() == 0:
            x = session.merge(db._current_creator)
            session.commit()
            time.sleep(0.5)
            print("Manager --> transmit run ID=", x.id)
            for i in range(1, pypar.size()):
                pypar.send(x.id, tag=3, destination=i)

            db._current_creator = x

        else:
            ID = pypar.receive(source=0, tag=3)
            print("Rank", pypar.rank(), " --> set run ID=", ID)
            db._current_creator = session.query(
                db.Creator).filter_by(id=ID).first()
            print(db._current_creator)

    else:
        print("NOT syncing DB references: MPI unavailable")
コード例 #2
0
 def _get_stations_local(self):
     all_stations = self._get_all_stations()
     num_stations = len(all_stations)
     mpisize = pypar.size()
     stations_per_proc = num_stations / mpisize
     leftover = num_stations % mpisize
     _station_distr = []
     ind_start = 0
     ind_end = 0
     rank = pypar.rank()
     for ind_proc in range(mpisize):
         ind_start = ind_end
         count = stations_per_proc
         if ind_proc < leftover:
             count += 1
         ind_end = ind_start + count
         _station_distr.append((ind_start, ind_end))
     start_local, end_local = _station_distr[rank]
     
     print pypar.rank(), stations_per_proc, leftover, start_local, end_local
     if start_local >= num_stations:
         self.stations_local = set()
         self.say('No enough stations for this process')
     else:
         self.say('Station range: %i -- %i (%i)' % (start_local, end_local, num_stations))
         self.stations_local = set(all_stations[start_local:end_local])
     assert self.stations_local or not self.is_master
コード例 #3
0
ファイル: topomult.py プロジェクト: wcarthur/topomultipliers
def run():
    """
    Run the process, handling any parallelisation.
    """
    
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--config",
                        help="Configuration file",
                        type=str)
    parser.add_argument("-i", "--inputfile",
                        help="Input DEM file (ascii format)",
                        type=str)
    parser.add_argument("-o", "--output", 
                        help="Output path",
                        type=str)
    parser.add_argument("-v", "--verbose", 
                        help=("Verbose output (not available when invoking"
                                "parallel run)") )
                                
    args = parser.parse_args() 
                          
    logfile = 'topomult.log'
    loglevel = 'INFO'
    
    if args.verbose:
        verbose = args.verbose
    else:
        verbose = False

    if args.config:
        cfg = ConfigParser.ConfigParser()
        cfg.read(args.config)

        input_file = cfg.get('Input', 'Filename')
        output_path = cfg.get('Output', 'Path')
        logfile = cfg.get('Logging', 'LogFile')
        loglevel = cfg.get('Logging', 'LogLevel')
        verbose = cfg.get('Logging', 'Verbose')
        
    if args.inputfile:
        input_file = args.inputfile

    if args.output:
        output_path = args.output
    
    attemptParallel()
    if pp.size() > 1 and pp.rank() > 0:
        logfile += '-' + str(pp.rank())
        verbose = False  # to stop output to console

    flStartLog(logfile, loglevel, verbose)
    
    pp.barrier()
    work(input_file, output_path,
             ['n','s','e','w','ne','nw','se','sw'])
    pp.barrier()
    
    pp.finalize()
コード例 #4
0
def _mpi_end_embarrass():
    global _mpi_initialized
    if _mpi_initialized:
        import pypar
        print(pypar.rank() + 1, " of ", pypar.size(), ": BARRIER")
        pypar.barrier()
        print(pypar.rank() + 1, " of ", pypar.size(), ": FINALIZE")
        pypar.finalize()
        _mpi_initialized = False
    else:
        print("Non-MPI run : Exit without MPI_Finalize")
コード例 #5
0
ファイル: pypar_balancer.py プロジェクト: Mahdisadjadi/pypar
def mprint(txt):
    """
    Print message txt
    with indentation following the node's rank
    """
    import pypar
    
    pre = " " * 8 * pypar.rank()
    if type(txt) != type('dummy'):
        txt = txt.__str__()
    pat = "-%d-"
    print pre + (pat % pypar.rank()) + txt
コード例 #6
0
def mprint(txt):
    """
    Print message txt
    with indentation following the node's rank
    """
    import pypar

    pre = " " * 8 * pypar.rank()
    if type(txt) != type('dummy'):
        txt = txt.__str__()
    pat = "-%d-"
    print pre + (pat % pypar.rank()) + txt
コード例 #7
0
	def __init__( s, dataform, global_pt1, global_pt2, spatial_step=(1,1,1) ):
		s.myrank = mpi.rank()
		s.dataform = dataform 
		s.global_pt1 = global_pt1
		s.global_pt2 = global_pt2
		s.spatial_step = spatial_step
		s.step = spatial_step[0]
コード例 #8
0
ファイル: functional.py プロジェクト: uniomni/pypar-1
def distributed_generator(iterable):
    """
    Distribute the values from a generator across workers.
    """
    RUN, DIE = range(2)
    P = pp.size()
    if P == 1:
        for el in iterable:
            yield el
    else:
        if pp.rank() == 0:
            it = iter(iterable)
            while True:
                try:
                    first = next(it)
                    for p in range(1, P):
                        pp.send(next(it), p, tag=RUN)
                    yield first
                except StopIteration:
                    for p in range(1, P):
                        pp.send(666, p, tag=DIE)
                    break
        else:
            while True:
                el, status = pp.receive(0, tag=pp.any_tag, return_status=True)
                if status.tag == DIE:
                    break
                yield el
コード例 #9
0
    def __init__(self,
                 coordinates,
                 vertices,
                 boundary = None,
                 full_send_dict = None,
                 ghost_recv_dict = None,
                 velocity = None):

        Domain.__init__(self,
                        coordinates,
                        vertices,
                        boundary,
                        velocity = velocity,
                        full_send_dict=full_send_dict,
                        ghost_recv_dict=ghost_recv_dict,
                        processor=pypar.rank(),
                        numproc=pypar.size()
                        )

        N = self.number_of_elements


        self.communication_time = 0.0
        self.communication_reduce_time = 0.0


        print 'processor',self.processor
        print 'numproc',self.numproc
コード例 #10
0
def rec_submesh(p, verbose=True):

    import pypar
    
    numproc = pypar.size()
    myid = pypar.rank()

    [submesh_cell, triangles_per_proc,\
     number_of_full_nodes, number_of_full_triangles] = rec_submesh_flat(p,verbose)
    
    # find the full triangles assigned to this processor

    lower_t = 0
    for i in range(myid):
        lower_t = lower_t+triangles_per_proc[i]
    upper_t = lower_t+triangles_per_proc[myid]

    # convert the information into a form needed by the GA
    # datastructure

    [GAnodes, GAtriangles, boundary, quantities, \
     ghost_rec, full_send, \
     tri_map, node_map, tri_l2g, node_l2g, \
     ghost_layer_width] = \
     build_local_mesh(submesh_cell, lower_t, upper_t, numproc)
    
    return GAnodes, GAtriangles, boundary, quantities,\
           ghost_rec, full_send,\
           number_of_full_nodes, number_of_full_triangles, tri_map, node_map,\
           tri_l2g, node_l2g, ghost_layer_width
コード例 #11
0
ファイル: parallel.py プロジェクト: nisarahmadkarimzada/eqrm
    def __init__(self, is_parallel=True):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        if is_parallel is True:
            try:
                import pypar
            except ImportError:
                self._not_parallel()
            else:
                if pypar.size() >= 2:
                    self.rank = pypar.rank()
                    self.size = pypar.size()
                    self.node = pypar.get_processor_name()
                    self.is_parallel = True
                    self.file_tag = FILE_TAG_DELIMITER + str(self.rank)
                    self.log_file_tag = FILE_TAG_DELIMITER + str(self.rank)
                else:
                    self._not_parallel()
        else:
            self._not_parallel()

        # Some constants to identify messages
        self.load_event_set = 0
コード例 #12
0
ファイル: parallel.py プロジェクト: dynaryu/eqrm
    def __init__(self, is_parallel=True):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        if is_parallel is True:
            try:
                import pypar
            except ImportError:
                self._not_parallel()
            else:
                if pypar.size() >= 2:
                    self.rank = pypar.rank()
                    self.size = pypar.size()
                    self.node = pypar.get_processor_name()
                    self.is_parallel = True
                    self.file_tag = FILE_TAG_DELIMITER + str(self.rank)
                    self.log_file_tag = FILE_TAG_DELIMITER + str(self.rank)
                else:
                    self._not_parallel()
        else:
            self._not_parallel()

        # Some constants to identify messages
        self.load_event_set = 0
コード例 #13
0
 def __init__(*args, **kwargs):
     self = args[0]
     statistician.DatabaseStatistician.__init__(*args, **kwargs)
     self.is_master = pypar.rank() == 0
     self.stations_local = None
     #assert pypar.size() > 1
     self.work_locally = not self.is_master
コード例 #14
0
    def __init__(self):

        self.proc = pypar.size()
        self.myid = pypar.rank()
        self.node = pypar.get_processor_name()

        return
コード例 #15
0
def rec_submesh(p, verbose=True):

    import pypar

    numproc = pypar.size()
    myid = pypar.rank()

    [submesh_cell, triangles_per_proc,\
     number_of_full_nodes, number_of_full_triangles] = rec_submesh_flat(p,verbose)

    # find the full triangles assigned to this processor

    lower_t = 0
    for i in range(myid):
        lower_t = lower_t + triangles_per_proc[i]
    upper_t = lower_t + triangles_per_proc[myid]

    # convert the information into a form needed by the GA
    # datastructure

    [GAnodes, GAtriangles, boundary, quantities, \
     ghost_rec, full_send, \
     tri_map, node_map, tri_l2g, node_l2g, \
     ghost_layer_width] = \
     build_local_mesh(submesh_cell, lower_t, upper_t, numproc)

    return GAnodes, GAtriangles, boundary, quantities,\
           ghost_rec, full_send,\
           number_of_full_nodes, number_of_full_triangles, tri_map, node_map,\
           tri_l2g, node_l2g, ghost_layer_width
コード例 #16
0
ファイル: RayTrace.py プロジェクト: asdfvar/ray-trace
   def __init__(self, LookPos, LookDir, LookYaw, WindowRows = 40, WindowCols = 40):
      self.LookPos = np.array(LookPos)
      self.LookDir = np.array(LookDir)
      self.Yaw = LookYaw
      self.WindowRows = WindowRows
      self.WindowCols = WindowCols
      rhop = np.linalg.norm(np.array([LookDir[0],LookDir[1]]))
      self.__Lon = math.atan2(LookDir[1], LookDir[0])
      self.__Lat = math.atan2(LookDir[2],rhop)
      self.start = time.time()
      
      # initialize the MPI
      self.numproc = pypar.size()
      self.myid =    pypar.rank()
      self.node =    pypar.get_processor_name()
      
      if self.myid != self.numproc - 1:
         self.Rows = self.WindowRows/self.numproc
         self.RowEnd = self.WindowRows/self.numproc * (self.myid+1) - 1
      else:
         self.Rows = self.WindowRows/self.numproc + self.WindowRows%self.numproc
         self.RowEnd = self.WindowRows

      self.RowStart = self.WindowRows/self.numproc * self.myid
      self.Window = np.zeros(shape = (self.Rows, self.WindowCols))
コード例 #17
0
    def __init__(self,
                 coordinates,
                 vertices,
                 boundary=None,
                 full_send_dict=None,
                 ghost_recv_dict=None,
                 velocity=None):

        Domain.__init__(self,
                        coordinates,
                        vertices,
                        boundary,
                        velocity=velocity,
                        full_send_dict=full_send_dict,
                        ghost_recv_dict=ghost_recv_dict,
                        processor=pypar.rank(),
                        numproc=pypar.size())

        N = self.number_of_elements

        self.communication_time = 0.0
        self.communication_reduce_time = 0.0

        print 'processor', self.processor
        print 'numproc', self.numproc
コード例 #18
0
ファイル: mpi.py プロジェクト: lelou6666/PySOL
def all_gather( obj ):
	myid = p.rank()
	nproc = p.size()
	result = [ None for i in xrange(nproc) ]
	result[myid] = obj
	for i in xrange(nproc):
		broadcast_vec(result,i)
	return result
コード例 #19
0
ファイル: mpi.py プロジェクト: whigg/PySOL
def broadcast_vec(vec, i):
    myid = p.rank()
    if myid == i:
        for j in xrange(p.size()):
            if j != myid:
                p.send(vec[i], j)
    else:
        vec[i] = p.receive(i)
コード例 #20
0
ファイル: mpi.py プロジェクト: whigg/PySOL
def all_gather(obj):
    myid = p.rank()
    nproc = p.size()
    result = [None for i in xrange(nproc)]
    result[myid] = obj
    for i in xrange(nproc):
        broadcast_vec(result, i)
    return result
コード例 #21
0
ファイル: mpi.py プロジェクト: lelou6666/PySOL
def broadcast_vec( vec, i ):
	myid = p.rank()
	if myid == i:
		for j in xrange(p.size()):
			if j != myid:
				p.send(vec[i],j)
	else:
		vec[i] = p.receive(i)
コード例 #22
0
def Inlet_operator(domain,
                   poly,
                   Q,
                   velocity=None,
                   default=None,
                   description=None,
                   label=None,
                   logging=False,
                   master_proc=0,
                   procs=None,
                   verbose=False):

    # If not parallel domain then allocate serial Inlet operator
    if isinstance(domain, Parallel_domain) is False:
        if verbose: print "Allocating non parallel inlet operator ....."
        return anuga.structures.inlet_operator.Inlet_operator(
            domain,
            poly,
            Q,
            velocity=velocity,
            default=default,
            description=description,
            label=label,
            logging=logging,
            verbose=verbose)

    import pypar
    if procs is None:
        procs = range(0, pypar.size())

    myid = pypar.rank()

    poly = num.array(poly, dtype='d')

    alloc, inlet_master_proc, inlet_procs, enquiry_proc = allocate_inlet_procs(
        domain, poly, master_proc=master_proc, procs=procs, verbose=verbose)

    if alloc:
        if verbose and myid == inlet_master_proc:
            print "Parallel Inlet Operator ================="
            print "Poly = " + str(poly)
            print "Master Processor is P%d" % (inlet_master_proc)
            print "Processors are P%s" % (inlet_procs)
            print "========================================="

        return Parallel_Inlet_operator(domain,
                                       poly,
                                       Q,
                                       velocity=velocity,
                                       default=default,
                                       description=description,
                                       label=label,
                                       logging=logging,
                                       master_proc=inlet_master_proc,
                                       procs=inlet_procs,
                                       verbose=verbose)
    else:
        return None
コード例 #23
0
def run_client():
    '''
    Runs
    '''
    # Identification
    myid = pypar.rank()  # id of this process
    nproc = pypar.size()  # number of processors

    print "I am client", myid
    pypar.finalize()
コード例 #24
0
ファイル: cluster_client.py プロジェクト: JoErNanO/brian
def run_client():
    '''
    Runs
    '''
    # Identification
    myid = pypar.rank() # id of this process
    nproc = pypar.size() # number of processors

    print "I am client", myid
    pypar.finalize()
コード例 #25
0
ファイル: tfsf_mpi.py プロジェクト: wbkifun/fdtd_accelerate
	def __init__( s, global_pt1, global_pt2, apply_direction, wavelength, propagation_direction, polarization_angle ):
		s.myrank = mpi.rank()
		s.global_pt1, s.global_pt2 = global_pt1, global_pt2
		s.apply_direction = apply_direction
		s.wavelength = wavelength
		s.propagation_direction = propagation_direction
		s.p_angle = polarization_angle

		s.gi1 = global_pt1[0]
		s.gi2 = global_pt2[0]
コード例 #26
0
def one_example():
    txt = ["yes", "no", "when", "what the", "a", "5ive!"]

    rank = pypar.rank()
    size = pypar.size()

    print "I am processor %d of %d. " % (rank, size)
    for i, ele in enumerate(txt):
        if i % size == rank:
            print "i" + str(i) + " P" + str(rank) + " len " + str(len(ele)) + " for " + ele
コード例 #27
0
    def _send_event(self, event, test, err=None):
        rank = pp.rank()
        if rank > 0:
            return

        data = pickle.dumps((rank, str(event), err))
        data = data.encode("latin1")
        header = struct.pack("!I", len(data))

        self.stream.write(header + data)
        self.stream.flush()
コード例 #28
0
def one_example():
    txt = ["yes", "no", "when", "what the", "a", "5ive!"]

    rank = pypar.rank()
    size = pypar.size()

    print "I am processor %d of %d. " % (rank, size)
    for i, ele in enumerate(txt):
        if i % size == rank:
            print "i" + str(i) + " P" + str(rank) + " len " + str(
                len(ele)) + " for " + ele
コード例 #29
0
ファイル: test.py プロジェクト: deepakkarki/pypar
    def _send_event(self, event, test, err=None):
        rank = pp.rank()
        if rank > 0:
            return

        data = pickle.dumps((rank, str(event), err))
        data = data.encode("latin1")
        header = struct.pack("!I", len(data))

        self.stream.write(header + data)
        self.stream.flush()
コード例 #30
0
ファイル: Environment.py プロジェクト: shambo001/peat
	def rank(self):
	
		'''Returns the rank of the process in the environment
		
		This is 0 if there is only one process and for the root processor'''
	
		if Environment.isParallel:
			import pypar
			return pypar.rank()
		else:
			return 0
コード例 #31
0
    def __init__(s, global_pt1, global_pt2, apply_direction, wavelength,
                 propagation_direction, polarization_angle):
        s.myrank = mpi.rank()
        s.global_pt1, s.global_pt2 = global_pt1, global_pt2
        s.apply_direction = apply_direction
        s.wavelength = wavelength
        s.propagation_direction = propagation_direction
        s.p_angle = polarization_angle

        s.gi1 = global_pt1[0]
        s.gi2 = global_pt2[0]
コード例 #32
0
ファイル: test_scatter.py プロジェクト: Mahdisadjadi/pypar
    def test_string(self):
        myid, ncpu = pp.rank(), pp.size()
        data = 'ABCDEFGHIJKLMNOP'  # Length = 16
        NP = len(data) / ncpu
        X = ' ' * NP

        pp.scatter(data, 0, buffer=X)  # With buffer
        Y = pp.scatter(data, 0)  # With buffer automatically created

        self.assertEqual(X, Y)
        self.assertEqual(Y, data[myid * NP:(myid + 1) * NP])
        self.assertEqual(X, data[myid * NP:(myid + 1) * NP])
コード例 #33
0
def print_test_stats(domain, tri_full_flag):

    myid = pypar.rank()

    for k in domain.quantities.keys():
        TestStage = domain.quantities[k]
        if myid == 0:
            print " ===== ", k, " ===== "
        full_edge = take(TestStage.edge_values, nonzero(tri_full_flag))
        print_l1_stats(full_edge)
        print_l2_stats(full_edge)
        print_linf_stats(full_edge)
コード例 #34
0
ファイル: test_scatter.py プロジェクト: uniomni/pypar-1
    def test_string(self):
        myid, ncpu = pp.rank(), pp.size()
        data = 'ABCDEFGHIJKLMNOP'  # Length = 16
        NP = len(data) / ncpu
        X = ' ' * NP

        pp.scatter(data, 0, buffer=X)  # With buffer
        Y = pp.scatter(data, 0)  # With buffer automatically created

        self.assertEqual(X, Y)
        self.assertEqual(Y, data[myid * NP:(myid + 1) * NP])
        self.assertEqual(X, data[myid * NP:(myid + 1) * NP])
コード例 #35
0
def _mpi_iterate(task_list):
    """Sets up an iterator returning items of task_list. If this is rank 0 processor, runs
    a separate thread which dishes out tasks to other ranks. If this is >0 processor, relies
    on getting tasks assigned by the rank 0 processor."""
    import pypar
    if pypar.rank() == 0:
        job_iterator = iter(enumerate(task_list))
        #import threading
        #i_thread = threading.Thread(target= lambda : _mpi_assign_thread(job_iterator))
        # i_thread.start()

        # kluge:
        i_thread = None
        _mpi_assign_thread(job_iterator)
        while True:
            try:
                job = job_iterator.next()[0]
                print "Manager --> Doing job", job, "of", len(
                    task_list), "myself"
                yield task_list[job]
            except StopIteration:
                print "Manager --> Out of jobs message to myself"
                if i_thread is not None:
                    i_thread.join()
                _mpi_end_embarrass()
                return

    while True:

        pypar.send(pypar.rank(), tag=1, destination=0)
        job = pypar.receive(0, tag=2)

        if job is None:
            _mpi_end_embarrass()
            return
        else:
            yield task_list[job]

    _mpi_end_embarrass()
コード例 #36
0
def am_i_the_master():
    if __PYPAR__:
        if pypar.rank() == 0:
            return True
        return False
    elif __MPI4PY__:
        comm = mpi4py.MPI.COMM_WORLD
        if comm.rank == 0:
            return True
        else:
            return False
    else:
        return True
コード例 #37
0
ファイル: test_scatter.py プロジェクト: Mahdisadjadi/pypar
    def test_without_root(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 16
        NP = N / ncpu
        data = np.array(range(N)).astype('i')
        X = np.zeros(NP).astype('i')

        pp.scatter(data, buffer=X)  # With buffer
        Y = pp.scatter(data)  # With buffer automatically created

        self.assertTrue(np.allclose(X, Y))
        self.assertTrue(np.allclose(X, data[myid * NP:(myid + 1) * NP]))
        self.assertTrue(np.allclose(Y, data[myid * NP:(myid + 1) * NP]))
コード例 #38
0
ファイル: test_scatter.py プロジェクト: uniomni/pypar-1
    def test_without_root(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 16
        NP = N / ncpu
        data = np.array(range(N)).astype('i')
        X = np.zeros(NP).astype('i')

        pp.scatter(data, buffer=X)  # With buffer
        Y = pp.scatter(data)  # With buffer automatically created

        self.assertTrue(np.allclose(X, Y))
        self.assertTrue(np.allclose(X, data[myid * NP:(myid + 1) * NP]))
        self.assertTrue(np.allclose(Y, data[myid * NP:(myid + 1) * NP]))
コード例 #39
0
ファイル: test_sendrecv.py プロジェクト: uniomni/pypar-1
    def test_longint_array(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 17  # Number of elements

        if myid == 0:
            A = np.array(range(N)).astype('l')
            B = np.zeros(N).astype('l')

            pp.send(A, 1, use_buffer=True)
            B = pp.receive(ncpu - 1, buffer=B)

            self.assertTrue(np.allclose(A, B))
        else:
            X = np.zeros(N).astype('l')
            X = pp.receive(myid - 1, buffer=X)
            pp.send(X, (myid + 1) % ncpu, use_buffer=True)
コード例 #40
0
ファイル: test_sendrecv.py プロジェクト: Mahdisadjadi/pypar
    def test_longint_array(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 17  # Number of elements

        if myid == 0:
            A = np.array(range(N)).astype('l')
            B = np.zeros(N).astype('l')

            pp.send(A, 1, use_buffer=True)
            B = pp.receive(ncpu - 1, buffer=B)

            self.assertTrue(np.allclose(A, B))
        else:
            X = np.zeros(N).astype('l')
            X = pp.receive(myid - 1, buffer=X)
            pp.send(X, (myid + 1) % ncpu, use_buffer=True)
コード例 #41
0
ファイル: collect_arr.py プロジェクト: erfanxyz/astrolibpy
def collect_arr(arr):
    """
    A useful collection routine for pypar.
    If you are using pypar to parallelize the set of nested loops and fill
    the resulting array, you usually need to combine the resulting array
    from several mpi threads. In that case you just can execute
    res=collect_arr(res)
    And it will add the arrays from all the threads and store them in
    the thread number 0
    """
    if pypar.rank() > 0:
        pypar.send(arr, 0)
    else:
        for i in range(1, pypar.size()):
            arr = arr + pypar.receive(i)
    return arr
コード例 #42
0
ファイル: collect_arr.py プロジェクト: basanop/SLiPy
def collect_arr(arr):
    """
    A useful collection routine for pypar.
    If you are using pypar to parallelize the set of nested loops and fill
    the resulting array, you usually need to combine the resulting array
    from several mpi threads. In that case you just can execute
    res=collect_arr(res)
    And it will add the arrays from all the threads and store them in
    the thread number 0
    """
    if pypar.rank() > 0:
        pypar.send(arr, 0)
    else:
        for i in range(1, pypar.size()):
            arr = arr + pypar.receive(i)
    return arr
コード例 #43
0
ファイル: pypar_balancer.py プロジェクト: Mahdisadjadi/pypar
    def __init__(self, work, debug = False):
        self.numprocs = pypar.size()           # Number of processes as specified by mpirun
        self.myid = pypar.rank()               # Id of of this process (myid in [0, numproc-1]) 
        self.node = pypar.get_processor_name() # Host name on which current process is running
        self.debug= debug
        self.work = work

        # Added by Ole Nielsen, 15 May 2008
        if self.numprocs < 2:
            msg = 'PyparBalancer must run on at least 2 processes'
            msg += ' for the Master Slave paradigm to make sense.'
            raise Exception, msg

        
        self.work.uplink(self, self.myid, self.numprocs, self.node)
        
        self.numworks = self.work.getNumWorkItems()
        print "PyparBalancer initialised on proc %d of %d on node %s" %(self.myid, self.numprocs, self.node)
コード例 #44
0
ファイル: functional.py プロジェクト: uniomni/pypar-1
def collected(iterable):
    """
    Collect iterables back to the master.
    """
    P = pp.size()
    if P == 1:
        for el in iterable:
            yield el
    else:
        if pp.rank() == 0:
            results = list(iterable)
            for p in range(1, P):
                pres = pp.receive(p)
                results.extend(pres)
            for el in results:
                yield el
        else:
            pp.send(list(iterable), 0)
コード例 #45
0
ファイル: test_scatter.py プロジェクト: uniomni/pypar-1
    def test_diff_master(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 16
        NP = N / ncpu

        check = np.array(range(N)).astype('i')
        X = np.zeros(NP).astype('i')

        data = np.empty(N, 'i')

        if myid == 0:  # only generated on master
            data = np.array(range(N)).astype('i')

        pp.scatter(data, 0, buffer=X)  # With buffer
        Y = pp.scatter(data, 0)  # With buffer automatically created

        self.assertTrue(np.allclose(X, Y))
        self.assertTrue(np.allclose(X, check[myid * NP:(myid + 1) * NP]))
        self.assertTrue(np.allclose(Y, check[myid * NP:(myid + 1) * NP]))
コード例 #46
0
ファイル: test_scatter.py プロジェクト: Mahdisadjadi/pypar
    def test_diff_master(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 16
        NP = N / ncpu

        check = np.array(range(N)).astype('i')
        X = np.zeros(NP).astype('i')

        data = np.empty(N, 'i')

        if myid == 0:  # only generated on master
            data = np.array(range(N)).astype('i')

        pp.scatter(data, 0, buffer=X)  # With buffer
        Y = pp.scatter(data, 0)  # With buffer automatically created

        self.assertTrue(np.allclose(X, Y))
        self.assertTrue(np.allclose(X, check[myid * NP:(myid + 1) * NP]))
        self.assertTrue(np.allclose(Y, check[myid * NP:(myid + 1) * NP]))
コード例 #47
0
def embarrass(file_list, pre_roll=0, post_roll=0):
    """Get a file list for this node (embarrassing parallelization)"""
    global _mpi_initialized

    if type(file_list) == set:
        file_list = list(file_list)

    import sys
    try:
        proc = int(sys.argv[-2])
        of = int(sys.argv[-1])
    except (IndexError, ValueError):
        if pre_roll != 0 or post_roll != 0:
            raise AssertionError(
                "Pre/post-roll no longer supported for MPI -- non-contiguuous")
        print("Trying to run in MPI mode...")
        import pypar

        _mpi_initialized = True

        proc = pypar.rank() + 1
        of = pypar.size()
        print("Success!", proc, "of", of)

        return _mpi_iterate(file_list)

    i = (len(file_list) * (proc - 1)) / of
    j = (len(file_list) * proc) / of - 1
    assert proc <= of and proc > 0
    if proc == of:
        j += 1
    print(proc, "processing", i, j, "(inclusive)")

    i -= pre_roll
    j += post_roll

    if i < 0:
        i = 0
    if j >= len(file_list):
        j = len(file_list) - 1

    return file_list[i:j + 1]
コード例 #48
0
def print_l1_stats(full_edge):

    numprocs = pypar.size()
    myid = pypar.rank()
    
    tri_norm = zeros(3, Float)
    recv_norm = zeros(3, Float)
    tri_norm[0] = l1_norm(full_edge[:, 0])
    tri_norm[1] = l1_norm(full_edge[:, 1])
    tri_norm[2] = l1_norm(full_edge[:, 2])
    if myid == 0:
        for p in range(numprocs-1):
            pypar.receive(p+1, recv_norm)
            tri_norm[0] = tri_norm[0]+recv_norm[0]
            tri_norm[1] = tri_norm[1]+recv_norm[1]
            tri_norm[2] = tri_norm[2]+recv_norm[2]
        print 'l1_norm along each axis : [', tri_norm[0],', ', tri_norm[1], ', ', tri_norm[2], ']'

    else:
        pypar.send(tri_norm, 0)
コード例 #49
0
ファイル: pyparprocessing.py プロジェクト: uniomni/pypar-1
def start(initializer=None, initargs=(), maxtasks=None):
    global Pool
    try:
        # make pypar available
        global pp
        import pypar as pp

        if pp.size() > 1:
            Pool = PyparPool
            if pp.rank() > 0:
                worker(initializer, initargs, maxtasks)
        else:
            # fallback to multiprocessing
            print 'Using multiprocessing'
            pp.finalize()
            import multiprocessing as mp
            Pool = mp.Pool

    except ImportError:  # no pypar
        return
コード例 #50
0
    def __init__(self, aWorkList):

        self.WORKTAG = 1
        self.DIETAG = 2

        self.MPI_myid = pypar.rank()
        self.MPI_numproc = pypar.size()
        self.MPI_node = pypar.get_processor_name()

        self.works = aWorkList
        self.numWorks = len(self.works)

        self.reduceFunction = None
        self.mapFunction = None
        self.result = None

        if self.MPI_numproc < 2:
            pypar.finalize()
            if self.MPI_myid == 0:
                raise Exception, 'ERROR: Number of processors must be greater than 2.'
コード例 #51
0
ファイル: MPIMapReducer.py プロジェクト: Mahdisadjadi/pypar
    def __init__(self, aWorkList):  
       
        self.WORKTAG = 1
        self.DIETAG =  2
        
        self.MPI_myid =    pypar.rank()
        self.MPI_numproc = pypar.size()
        self.MPI_node =    pypar.get_processor_name()

        self.works = aWorkList
        self.numWorks = len(self.works)

        self.reduceFunction = None
        self.mapFunction = None
        self.result = None
   
        if self.MPI_numproc < 2:
            pypar.finalize()
            if self.MPI_myid == 0:
                raise Exception, 'ERROR: Number of processors must be greater than 2.'
コード例 #52
0
ファイル: pyparprocessing.py プロジェクト: deepakkarki/pypar
def start(initializer=None, initargs=(), maxtasks=None):
    global Pool
    try:
        # make pypar available
        global pp
        import pypar as pp

        if pp.size() > 1:
            Pool = PyparPool
            if pp.rank() > 0:
                worker(initializer, initargs, maxtasks)
        else:
            # fallback to multiprocessing
            print "Using multiprocessing"
            pp.finalize()
            import multiprocessing as mp

            Pool = mp.Pool

    except ImportError:  # no pypar
        return
コード例 #53
0
ファイル: ldos_examples.py プロジェクト: zonksoft/envTB
def use_w90_example(Ny=30, Nx=30, magnetic_B=None):
    

    #pypar.finalize()  
    # Ny: number of atoms in slice is 2*(Ny+1)
    ham_w90 = define_zigzag_ribbon_w90(
        "../../exampledata/02_graphene_3rdnn/graphene3rdnnlist.dat", 
        Ny, Nx, magnetic_B=magnetic_B)
    
    ham = envtb.ldos.hamiltonian.HamiltonianFromW90(ham_w90, Nx)
    #print ham.mtot
    """
    i0 = ham.Nx / 2
    j0 = ham.Ny / 2
    ic = (i0 - 1) * ham.Ny + (j0-1)
   
    potential = envtb.ldos.potential.Potential2DFromFunction(
        lambda x: 0.01 * (ham.coords[ic][1] - x[1])**2 + 0.01 * \
                 (ham.coords[ic][0] - x[0])**2)
    
    ham2 = ham.apply_potential(potential)
    envtb.ldos.plotter.Plotter().plot_potential(ham2, ham)
    plt.axes().set_aspect('equal')
    plt.show()
    """
    import pypar
    
    proc = pypar.size()
    myid = pypar.rank()
    node = pypar.get_processor_name()
    print 'I am proc %d of %d on node %s' % (myid, proc, node)
    
    local_dos=envtb.ldos.local_density.LocalDensityOfStates(ham)
    
    envtb.ldos.plotter.Plotter().plot_density(local_dos(0.7), ham.coords)
    plt.title('E = 0.7')
    plt.axes().set_aspect('equal')
    plt.show()
    
    return None
コード例 #54
0
    def __init__(self, domain, poly, master_proc = 0, procs = None, verbose=False):

        self.domain = domain
        self.poly = num.asarray(poly, dtype=num.float64)
        self.verbose = verbose

        self.line = True
        if len(self.poly) > 2:
            self.line = False

        self.master_proc = master_proc

        if procs is None:
            self.procs = [self.master_proc]
        else:
            self.procs = procs

        import pypar
        self.myid = pypar.rank()

        self.compute_triangle_indices()
        self.compute_area()
コード例 #55
0
ファイル: parallel.py プロジェクト: wcarthur/hazimp
    def __init__(self):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        try:
            import pypar   # pylint: disable=W0404
        except ImportError:
            self._not_parallel()
        else:
            if pypar.size() >= 2:
                self.rank = pypar.rank()
                self.size = pypar.size()
                self.node = pypar.get_processor_name()
                self.is_parallel = True
                self.file_tag = str(self.rank)
                self.log_file_tag = str(self.rank)

                # Ensure a clean MPI exit
                atexit.register(pypar.finalize)
            else:
                self._not_parallel()