Exemple #1
0
def scatter_dict(whole):
    """
    Broadcast and recieve a dictionary where the values are 1d arrays
    and the arrays are chunked for the workers.
    Only rank 0 needs the whole dictionary.

    :param whole: The dictionary of 1d arrays to subdict.
    :returns: (chunk of dictionary of 1d arrays, indexes of whole array)
    """
    if not STATE.is_parallel:
        array_len = len(whole[whole.keys()[0]])
        return whole, numpy.array(range(0, array_len))
    else:
        import pypar     # pylint: disable=W0404

    if STATE.rank == 0:
        array_len = len(whole[whole.keys()[0]])
        for pro in range(0, STATE.size):
            temp_indexes = numpy.array(range(pro, array_len, STATE.size))
            temp_subdict = {}
            for key in whole.keys():
                temp_subdict[key] = whole[key][temp_indexes]
            if pro is 0:
                indexes = temp_indexes
                subdict = temp_subdict
            else:
                pypar.send(temp_indexes, pro)
                pypar.send(temp_subdict, pro)
    else:
        indexes = pypar.receive(0)
        subdict = pypar.receive(0)
    return subdict, indexes
    def get_enquiry_depths(self):
        # Should be called from all processors associated with operator

        enq0 = None
        enq1 = None

        get0 = "self.inlets[0].get_enquiry_depth()"
        get1 = "self.inlets[1].get_enquiry_depth()"

        if self.myid == self.master_proc:

            if self.myid == self.enquiry_proc[0]:
                enq0 = eval(get0)
            else:
                enq0 = pypar.receive(self.enquiry_proc[0])

            if self.myid == self.enquiry_proc[1]:
                enq1 = eval(get1)
            else:
                enq1 = pypar.receive(self.enquiry_proc[1])

        else:
            if self.myid == self.enquiry_proc[0]:
                enq0 = eval(get0)
                pypar.send(enq0, self.master_proc)

            if self.myid == self.enquiry_proc[1]:
                enq1 = eval(get1)
                pypar.send(enq1, self.master_proc)

        return [enq0, enq1]
Exemple #3
0
    def get_enquiry_depths(self):
        # Should be called from all processors associated with operator

        enq0 = None
        enq1 = None

        get0 = 'self.inlets[0].get_enquiry_depth()'
        get1 = 'self.inlets[1].get_enquiry_depth()'

        if self.myid == self.master_proc:

            if self.myid == self.enquiry_proc[0]:
                enq0 = eval(get0)
            else:
                enq0 = pypar.receive(self.enquiry_proc[0])

            if self.myid == self.enquiry_proc[1]:
                enq1 = eval(get1)
            else:
                enq1 = pypar.receive(self.enquiry_proc[1])

        else:
            if self.myid == self.enquiry_proc[0]:
                enq0 = eval(get0)
                pypar.send(enq0, self.master_proc)

            if self.myid == self.enquiry_proc[1]:
                enq1 = eval(get1)
                pypar.send(enq1, self.master_proc)

        return [enq0, enq1]
    def get_enquiry_water_depths(self):

        enq0 = None
        enq1 = None

        get0 = 'self.inlets[0].get_enquiry_water_depth()'
        get1 = 'self.inlets[1].get_enquiry_water_depth()'


        if self.myid == self.master_proc:

            if self.myid == self.enquiry_proc[0]:
                enq0 = eval(get0)
            else:
                enq0 = pypar.receive(self.enquiry_proc[0])


            if self.myid == self.enquiry_proc[1]:
                enq1 = eval(get1)
            else:
                enq1 = pypar.receive(self.enquiry_proc[1])

        else:
            if self.myid == self.enquiry_proc[0]:
                enq0 = eval(get0)
                pypar.send(enq0, self.master_proc)

            if self.myid == self.enquiry_proc[1]:
                enq1 = eval(get1)
                pypar.send(enq1, self.master_proc)


        return [enq0, enq1]
Exemple #5
0
def scatter_dict(whole):
    """
    Broadcast and recieve a dictionary where the values are 1d arrays
    and the arrays are chunked for the workers.
    Only rank 0 needs the whole dictionary.

    :param whole: The dictionary of 1d arrays to subdict.
    :returns: (chunk of dictionary of 1d arrays, indexes of whole array)
    """
    if not STATE.is_parallel:
        array_len = len(whole[list(whole.keys())[0]])
        return whole, numpy.array(list(range(0, array_len)))
    else:
        import pypar  # pylint: disable=W0404

    if STATE.rank == 0:
        array_len = len(whole[list(whole.keys())[0]])
        for pro in range(0, STATE.size):
            temp_indexes = numpy.array(list(range(pro, array_len, STATE.size)))
            temp_subdict = {}
            for key in list(whole.keys()):
                temp_subdict[key] = whole[key][temp_indexes]
            if pro == 0:
                indexes = temp_indexes
                subdict = temp_subdict
            else:
                pypar.send(temp_indexes, pro)
                pypar.send(temp_subdict, pro)
    else:
        indexes = pypar.receive(0)
        subdict = pypar.receive(0)
    return subdict, indexes
Exemple #6
0
	def balanceArrays(self, arrayFragment):
	
		'''Redistributes the elements in a set of arrays equally across the nodes'''
		
		if Environment.isParallel:
			import pypar
			if self.isRoot():
				completeArray = arrayFragment
				for i in range(1, pypar.size()):
					fragment = pypar.receive(i)
					completeArray.extend(fragment)
				
				#Divide it up
				divisions = self._divideArray(completeArray)
				
				#Send the fragments
				for i in range(1, pypar.size()):
					start, end = divisions[i]
					pypar.send(completeArray[start:end], i)
					
				self.output('[ENV] Rebalanced array divisions %s' % divisions)	
					
				#Assign root fragment	
				start, end = divisions[0]	
				arrayFragment = completeArray[start:end]
			else:
				#Send the fragment
				pypar.send(arrayFragment, 0)
				#Retrieve the array
				arrayFragment = pypar.receive(0)
		else:
			completeArray = arrayFragment
		
		return arrayFragment
Exemple #7
0
    def get_enquiry_water_depths(self):

        enq0 = None
        enq1 = None

        get0 = 'self.inlets[0].get_enquiry_water_depth()'
        get1 = 'self.inlets[1].get_enquiry_water_depth()'

        if self.myid == self.master_proc:

            if self.myid == self.enquiry_proc[0]:
                enq0 = eval(get0)
            else:
                enq0 = pypar.receive(self.enquiry_proc[0])

            if self.myid == self.enquiry_proc[1]:
                enq1 = eval(get1)
            else:
                enq1 = pypar.receive(self.enquiry_proc[1])

        else:
            if self.myid == self.enquiry_proc[0]:
                enq0 = eval(get0)
                pypar.send(enq0, self.master_proc)

            if self.myid == self.enquiry_proc[1]:
                enq1 = eval(get1)
                pypar.send(enq1, self.master_proc)

        return [enq0, enq1]
    def get_enquiry_velocitys(self):

        enq0 = None
        enq1 = None

        get0 = "self.inlets[0].get_enquiry_velocity()"
        get1 = "self.inlets[1].get_enquiry_velocity()"

        if self.myid == self.master_proc:

            if self.myid == self.enquiry_proc[0]:
                enq0 = eval(get0)
            else:
                enq0 = pypar.receive(self.enquiry_proc[0])

            if self.myid == self.enquiry_proc[1]:
                enq1 = eval(get1)
            else:
                enq1 = pypar.receive(self.enquiry_proc[1])

        else:
            if self.myid == self.enquiry_proc[0]:
                enq0 = eval(get0)
                pypar.send(enq0, self.master_proc)

            if self.myid == self.enquiry_proc[1]:
                enq1 = eval(get1)
                pypar.send(enq1, self.master_proc)

        return [enq0, enq1]
Exemple #9
0
    def multi_proc_mutate_and_integrate(self, prmt, mutation):
        """subroutine to send jobs to multiple processors using pypar
        
        It send one network - and not the complete population- to each processor
        and explicitly we take care of synchronization.
        
        Args:
            prmt (dict): the inits parameters for integration
            mutation (list): tuple (id,mut) indicating the mutation flag of integration for each network
        
        Returns:
            int: the total number of mutations
        """
        numproc = self.numproc  #computes number of available proc for integration : the  proc 0 is used as master proc
        l = len(mutation)
        n_mut = 0
        for index_job in range(l):
            nproc = 1 + index_job % (
                numproc - 1
            )  #computes the proc number where to send the job, we start at 1, proc 0 is master proc
            args = {
                'net': self.genus[index_job],
                'prmt': prmt,
                'nnetwork': index_job,
                'tgeneration': self.tgeneration,
                'mutation': mutation[index_job]
            }
            pypar.send((
                'net.mutate_and_integrate(prmt,nnetwork,tgeneration,mutation)',
                args), nproc)  #send integration job to the selected proc
            if ((index_job + 1) % (numproc - 1) == 0):
                pypar.barrier(
                )  # every numproc jobs sent, we wait for all other processors to finish their job to synchronize
                results = [
                    pypar.receive(worker) for worker in range(1, numproc)
                ]  #receives results from all processors
                for i in results:
                    n_mut += i[0]  #updates number of mutations
                    self.genus[i[1]] = i[2]  #updates mutated network
                    self.update_fitness(i[1], i[3])  #updates fitness values

        #at that point, we may have jobs still running on some subset of the procs, but we only take the results from the last working processors
        if (l % (numproc - 1) > 0):
            for i in range(l % (numproc - 1), numproc - 1):
                pypar.send(
                    ('0', {}), i + 1
                )  #send dummy jobs to the still processors for synchornization purpose
            pypar.barrier()  #synchronize the proc
            results = [pypar.receive(worker) for worker in range(1, numproc)]
            #only take the results we are interested in
            for i in range(l % (numproc - 1)):
                n_mut += results[i][0]
                self.genus[results[i][1]] = results[i][2]
                self.update_fitness(results[i][1],
                                    results[i][3])  #updates fitness values
        # Shut down workers
        #for worker in range(1,numproc):
        #    pypar.send(SystemExit(),worker)
        return n_mut
Exemple #10
0
    def master(self):
        self.numCompleted = 0
        self.mapList = list()
        logging.info('[MASTER]: started processor %d of %d on node %s: number of works: %d'%(self.MPI_myid, self.MPI_numproc, self.MPI_node, self.numWorks))

        # start slaves distributing the first work slot             
        rounder = 0
        if self.MPI_numproc <= self.numWorks:
            rounder = 1
        for i in range(min(self.MPI_numproc, self.numWorks)-rounder):
            work = self.works[i]
            pypar.send(work, destination=i+1, tag=self.WORKTAG)
            logging.debug('[MASTER]: sent work "%s" to node %d' %(work, i+1))

        # dispatch the remaining work slots on dynamic load-balancing policy
        # the quicker to do the job, the more jobs it takes
        for work in self.works[self.MPI_numproc-1:]:
            result, status = pypar.receive(source=pypar.any_source, tag=self.WORKTAG,
                                           return_status=True)
            logging.debug('[MASTER]: received result "%s" from node %d'%(result, status.source))
                  
            self.mapList.append(result)
            self.numCompleted += 1           
            logging.debug('[MASTER]: done : %d' %self.numCompleted)       
        
            pypar.send(work, destination=status.source, tag=self.WORKTAG)
            logging.debug('[MASTER]: sent work "%s" to node %d' %(work, status.source))

        # all works have been dispatched out
        logging.debug('[MASTER]: toDo : %d' %self.numWorks)
        logging.debug('[MASTER]: done : %d' %self.numCompleted)

        # I've still to take into the remaining completions
        while (self.numCompleted < self.numWorks):
            result, status = pypar.receive(source=pypar.any_source, tag=self.WORKTAG,
                                       return_status=True)
            logging.debug('[MASTER]: received (final) result "%s" from node %d'%(result, status.source))
            
            self.mapList.append(result)            
            self.numCompleted += 1
            logging.debug('[MASTER]: %d completed' %self.numCompleted)

        logging.debug('[MASTER]: about to terminate slaves')

        # Tell slaves to stop working
        for i in range(1, self.MPI_numproc):
            pypar.send('#', destination=i, tag=self.DIETAG)
            logging.debug('[MASTER]: sent termination signal to node %d' %(i, ))

        # call the reduce function
        logging.info('[MASTER]: about to run reduce')
        res = self.reduceFunction(self.mapList)
        return res
Exemple #11
0
    def master(self):
        numcompleted = 0
        #--- start slaves distributing the first work slot
        for i in range(0, min(self.numprocs - 1, self.numworks)):
            work = i
            slave = i + 1
            pypar.send(work, destination=slave, tag=PYPAR_WORKTAG)
            if self.debug:
                print '[MASTER ]: sent first work "%s" to node %d' % (work,
                                                                      slave)

        # dispatch the remaining work slots on dynamic load-balancing policy
        # the quicker to do the job, the more jobs it takes
        for work in range(self.numprocs - 1, self.numworks):
            result, status = pypar.receive(source=pypar.any_source,
                                           tag=PYPAR_WORKTAG,
                                           return_status=True)
            if self.debug:
                print '[MASTER ]: received result from node %d' % (
                    status.source, )
            numcompleted += 1
            pypar.send(work, destination=status.source, tag=PYPAR_WORKTAG)
            if self.debug:
                print '[MASTER ]: sent work "%s" to node %d' % (work,
                                                                status.source)

            self.work.handleWorkResult(result, status)

        # all works have been dispatched out
        if self.debug: print '[MASTER ]: ToDo : %d' % self.numworks
        if self.debug: print '[MASTER ]: Done : %d' % numcompleted

        # I've still to take into the remaining completions
        while (numcompleted < self.numworks):
            result, status = pypar.receive(source=pypar.any_source,
                                           tag=PYPAR_WORKTAG,
                                           return_status=True)
            if self.debug:
                print '[MASTER ]: received (final) result from node %d' % (
                    status.source, )
            numcompleted += 1
            if self.debug: print '[MASTER ]: %d completed' % numcompleted

            self.work.handleWorkResult(result, status)

        if self.debug: print '[MASTER ]: about to terminate slaves'

        # Tell slaves to stop working
        for i in range(1, self.numprocs):
            pypar.send('#', destination=i, tag=PYPAR_DIETAG)
            if self.debug: print '[MASTER ]: sent DIETAG to node %d' % (i, )
def mpi_sync_db(session):
    """Causes the halo_db module to use the rank 0 processor's 'Creator' object"""

    global _mpi_initialized

    if _mpi_initialized:
        import pypar
        import halo_db as db

        if pypar.rank() == 0:
            x = session.merge(db._current_creator)
            session.commit()
            time.sleep(0.5)
            print("Manager --> transmit run ID=", x.id)
            for i in range(1, pypar.size()):
                pypar.send(x.id, tag=3, destination=i)

            db._current_creator = x

        else:
            ID = pypar.receive(source=0, tag=3)
            print("Rank", pypar.rank(), " --> set run ID=", ID)
            db._current_creator = session.query(
                db.Creator).filter_by(id=ID).first()
            print(db._current_creator)

    else:
        print("NOT syncing DB references: MPI unavailable")
def test_mpi_recvsend(target_array, message_range, target_rank, tag_mark):

    send_range = message_range[0]
    recv_range = message_range[1]
    target_array[recv_range] = mpi.receive(target_rank,tag=tag_mark)
  #  print 'I`m', myrank, 'Recv : ', target_rank, 'range : ', recv_range
    mpi.send(target_array[send_range].copy(), target_rank, tag=tag_mark)
Exemple #14
0
    def slave(self):
        if self.debug:
            print '[SLAVE %d]: I am processor %d of %d on node %s' % (
                self.myid, self.myid, self.numprocs, self.node)
        if self.debug: print '[SLAVE %d]: Entering work loop' % (self.myid, )
        while True:
            result, status = pypar.receive(source=0,
                                           tag=pypar.any_tag,
                                           return_status=True)
            print '[SLAVE %d]: received work with tag %d from node %d'\
                      %(self.myid, status.tag, status.source)

            if (status.tag == PYPAR_DIETAG):
                print '[SLAVE %d]: received termination from node %d' % (
                    self.myid, 0)
                return
            else:
                worknum = result
                if self.debug:
                    print '[SLAVE %d]: work number is %s' % (self.myid,
                                                             worknum)
                myresult = self.work.calcWorkResult(worknum)
                pypar.send(myresult, destination=0)
                if self.debug:
                    print '[SLAVE %d]: sent result to node %d' % (self.myid, 0)
Exemple #15
0
	def splitArray(self, array):
	
		'''Splits array between all the processes in the environment.
		
		Each process will be returned a different section of the array to work on'''
	
		if Environment.isParallel:
			import pypar
			#Split the array into sections and return the section for this processor
			divisions = []
			if self.isRoot():
				#Root does the splitting - we send each processor the start and end index
				#NOTE: pypar broadcast won't work even when setting vanilla
				#It always returns message trucated error.
				divisions = self._divideArray(array)	
				for i in range(1,pypar.size()):
					pypar.send(divisions[i], i)
				start = divisions[0][0]
				end = divisions[0][1]
			else:	
				indexes = pypar.receive(0)
				start = indexes[0]
				end = indexes[1]
				
			return array[start:end]
		else:
			return array
Exemple #16
0
def receive_any(source=None):
    if source is None:
        source = pypar.any_source
    data, status = pypar.receive(source=source,
                                 return_status=True,
                                 tag=pypar.any_tag)
    return data, status.source, status.tag
Exemple #17
0
def broadcast( obj ):
	if root():
		for i in xrange(p.size()-1):
			p.send(obj,i+1)
		return obj
	else:
		return p.receive(0)
Exemple #18
0
def scatter(vec):
    if root():
        for i in xrange(p.size() - 1):
            p.send(vec[i + 1], i + 1)
        return vec[0]
    else:
        return p.receive(0)
Exemple #19
0
def broadcast(obj):
    if root():
        for i in xrange(p.size() - 1):
            p.send(obj, i + 1)
        return obj
    else:
        return p.receive(0)
Exemple #20
0
def distributed_generator(iterable):
    """
    Distribute the values from a generator across workers.
    """
    RUN, DIE = range(2)
    P = pp.size()
    if P == 1:
        for el in iterable:
            yield el
    else:
        if pp.rank() == 0:
            it = iter(iterable)
            while True:
                try:
                    first = next(it)
                    for p in range(1, P):
                        pp.send(next(it), p, tag=RUN)
                    yield first
                except StopIteration:
                    for p in range(1, P):
                        pp.send(666, p, tag=DIE)
                    break
        else:
            while True:
                el, status = pp.receive(0, tag=pp.any_tag, return_status=True)
                if status.tag == DIE:
                    break
                yield el
Exemple #21
0
    def slave(self):

        logging.debug(
            '[SLAVE %d]: started processor %d of %d on node %s' %
            (self.MPI_myid, self.MPI_myid, self.MPI_numproc, self.MPI_node))

        while True:
            inputMsg, status = pypar.receive(source=0,
                                             tag=pypar.any_tag,
                                             return_status=True)
            logging.debug(
                '[SLAVE %d]: received work "%s" with tag %d from node %d' %
                (self.MPI_myid, inputMsg, status.tag, status.source))

            if (status.tag == self.DIETAG):
                logging.debug('[SLAVE %d]: received termination from node %d' %
                              (self.MPI_myid, 0))
                return
            else:
                logging.debug('[SLAVE %d]: received work "%s" to map' %
                              (self.MPI_myid, inputMsg))
                resultMsg = self.mapFunction(inputMsg)
                pypar.send(resultMsg, destination=0, tag=self.WORKTAG)
                logging.debug('[SLAVE %d]: sent result "%s" to node %d' %
                              (self.MPI_myid, resultMsg, 0))
Exemple #22
0
    def test_longint_array(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 17  # Number of elements

        if myid == 0:
            A = np.array(range(N)).astype('l')
            B = np.zeros(N).astype('l')

            pp.send(A, 1, use_buffer=True)
            B = pp.receive(ncpu - 1, buffer=B)

            self.assertTrue(np.allclose(A, B))
        else:
            X = np.zeros(N).astype('l')
            X = pp.receive(myid - 1, buffer=X)
            pp.send(X, (myid + 1) % ncpu, use_buffer=True)
Exemple #23
0
def scatter( vec ):
	if root():
		for i in xrange(p.size()-1):
			p.send(vec[i+1],i+1)
		return vec[0]
	else:
		return p.receive(0)
def _mpi_assign_thread(job_iterator):
    # Sit idle until request for a job comes in, then assign first
    # available job and move on. Jobs are labelled through the
    # provided iterator
    import pypar
    import pypar.mpiext

    j = -1

    print("Manager --> Entered iterator code")

    alive = [True for i in range(pypar.size())]

    while any(alive[1:]):
        dest = pypar.receive(source=pypar.mpiext.MPI_ANY_SOURCE, tag=1)
        try:
            time.sleep(0.05)
            j = job_iterator.next()[0]
            print("Manager --> Sending job", j, "to rank", dest)
        except StopIteration:
            alive[dest] = False
            print("Manager --> Sending out of job message to ", dest)
            j = None

        pypar.send(j, destination=dest, tag=2)

    print("Manager --> All jobs done and all processors>0 notified; exiting thread")
Exemple #25
0
    def get_global_average_elevation(self):
        # GLOBAL: Master processor gathers elevations from all child processors, and returns average

        # WARNING: requires synchronization, must be called by all procs associated
        # with this inlet

        import pypar
        local_elevation = num.sum(self.get_elevations() * self.get_areas())
        global_area = self.get_global_area()

        global_elevation = local_elevation

        if self.myid == self.master_proc:
            for i in self.procs:
                if i == self.master_proc: continue

                val = pypar.receive(i)
                global_elevation = global_elevation + val
        else:
            pypar.send(local_elevation, self.master_proc)

        if global_area > 0.0:
            return global_elevation / global_area
        else:
            return 0.0
    def get_global_average_elevation(self):
        # GLOBAL: Master processor gathers elevations from all child processors, and returns average

        # WARNING: requires synchronization, must be called by all procs associated
        # with this inlet

        import pypar
        local_elevation = num.sum(self.get_elevations()*self.get_areas())
        global_area = self.get_global_area()



        global_elevation = local_elevation

        if self.myid == self.master_proc:
            for i in self.procs:
                if i == self.master_proc: continue

                val = pypar.receive(i)
                global_elevation = global_elevation + val
        else:
            pypar.send(local_elevation, self.master_proc)


        if global_area > 0.0:
            return global_elevation/global_area
        else:
            return 0.0
Exemple #27
0
    def test_longint_array(self):
        myid, ncpu = pp.rank(), pp.size()
        N = 17  # Number of elements

        if myid == 0:
            A = np.array(range(N)).astype('l')
            B = np.zeros(N).astype('l')

            pp.send(A, 1, use_buffer=True)
            B = pp.receive(ncpu - 1, buffer=B)

            self.assertTrue(np.allclose(A, B))
        else:
            X = np.zeros(N).astype('l')
            X = pp.receive(myid - 1, buffer=X)
            pp.send(X, (myid + 1) % ncpu, use_buffer=True)
Exemple #28
0
def master():
    numCompleted = 0

    print '[MASTER]: I am processor %d of %d on node %s'\
          %(MPI_myid, MPI_numproc, MPI_node)

    # start slaves distributing the first work slot
    for i in range(1, min(MPI_numproc, numWorks)):
        work = workList[i]
        pypar.send(work, destination=i, tag=WORKTAG)
        print '[MASTER]: sent work "%s" to node %d' %(work, i)

    # dispatch the remaining work slots on dynamic load-balancing policy
    # the quicker to do the job, the more jobs it takes
    for work in workList[MPI_numproc:]:
        result, status = pypar.receive(source=pypar.any_source, tag=WORKTAG,
                                       return_status=True)

        print '[MASTER]: received result "%s" from node %d'\
              %(result, status.source)

        numCompleted += 1
        pypar.send(work, destination=status.source, tag=WORKTAG)
        print '[MASTER]: sent work "%s" to node %d' %(work, status.source)

    # all works have been dispatched out
    print '[MASTER]: toDo : %d' %numWorks
    print '[MASTER]: done : %d' %numCompleted

    # I've still to take into the remaining completions
    while (numCompleted < numWorks):
        result, status = pypar.receive(source=pypar.any_source,
                                       tag=WORKTAG,
                                       return_status=True)
        print '[MASTER]: received (final) result "%s" from node %d'\
                  %(result, status.source)
        numCompleted += 1
        print '[MASTER]: %d completed' %numCompleted

    print '[MASTER]: about to terminate slaves'

    # Tell slaves to stop working
    for i in range(1, MPI_numproc):
        pypar.send('#', destination=i, tag=DIETAG)
        print '[MASTER]: sent termination signal to node %d' %(i, )

    return
    def Calculate(self, varsByCalc, params=None):
        """
        Calculate model predictions for everything in varsByCalc.

        varsByCalc is a dictionary of the form:
            dict[calc name][dep var] = ind var
        
        The return dictionary is of the form:
            dictionary[calc name][dep var][ind var] = result
        """
        if params is not None:
            self.params.update(params)

        results = {}

        calcs_to_do = varsByCalc.keys()
        # Record which calculation each node is doing
        calc_assigned = {}
        while calcs_to_do:
            # The number of calculations to do this round. We want to use
            #  all the processors if possible.
            len_this_block = min(SloppyCell.num_procs, len(calcs_to_do))

            for worker in range(1, len_this_block):
                calc = calcs_to_do.pop()
                calc_assigned[worker] = calc
                logger.debug('Assigning calculation %s to worker %i.' %
                             (calc, worker))
                command = 'Network.calculate(net, vars, params)'
                args = {
                    'net': self.get(calc),
                    'vars': varsByCalc[calc],
                    'params': self.params
                }
                pypar.send((command, args), worker)

            # The master does his share here
            calc = calcs_to_do.pop()
            # We use the finally statement because we want to ensure that we
            #  *always* wait for replies from the workers, even if the master
            #  encounters an exception in his evaluation.
            try:
                results[calc] = self.get(calc).calculate(
                    varsByCalc[calc], self.params)
            finally:
                # Collect results from the workers
                for worker in range(1, len_this_block):
                    logger.debug('Receiving result from worker %i.' % worker)
                    results[calc_assigned[worker]] = pypar.receive(worker)
                # If the master encounts an exception, we'll break out of the
                #  function ***here***

            # Check the results we received. If any is a SloppyCellException,
            #  reraise it.
            for val in results.values():
                if isinstance(val, Utility.SloppyCellException):
                    raise val

        return results
    def statistics(self):
        # Warning: requires synchronization, must be called by all procs associated
        # with this structure

        message = ' '

        if self.myid == self.master_proc:

            message = '===============================================\n'
            message += 'Parallel Structure Operator: %s\n' % self.label
            message += '===============================================\n'

            message += 'Structure Type: %s\n' % self.structure_type

            message += 'Description\n'
            message += '%s' % self.description
            message += '\n'

            #add the culvert dimensions, blockage factor here
            if self.structure_type == 'boyd_pipe':
                message += 'Culvert Diameter: %s\n' % self.diameter
                message += 'Culvert Blockage: %s\n' % self.blockage
                message += 'No.  of  barrels: %s\n' % self.barrels
            elif self.structure_type == 'boyd_box':
                message += 'Culvert   Height: %s\n' % self.height
                message += 'Culvert    Width: %s\n' % self.width
                message += 'Culvert Blockage: %s\n' % self.blockage
                message += 'No.  of  barrels: %s\n' % self.barrels
            else:
                message += 'Culvert Height: %s\n' % self.height
                message += 'Culvert  Width: %s\n' % self.width
                message += 'Batter Slope 1: %s\n' % self.z1
                message += 'Batter Slope 2: %s\n' % self.z2

        #print "Structure Myids ",self.myid, self.label

        for i, inlet in enumerate(self.inlets):
            if self.myid == self.master_proc:
                message += '-------------------------------------\n'
                message += 'Inlet %i\n' % (i)
                message += '-------------------------------------\n'

            #print "*****",inlet, i,self.myid
            if inlet is not None:

                stats = inlet.statistics()

            if self.myid == self.master_proc:
                if self.myid != self.inlet_master_proc[i]:
                    stats = pypar.receive(self.inlet_master_proc[i])
            elif self.myid == self.inlet_master_proc[i]:
                pypar.send(stats, self.master_proc)

            if self.myid == self.master_proc: message += stats

        if self.myid == self.master_proc:
            message += '=====================================\n'

        return message
Exemple #31
0
def broadcast_vec( vec, i ):
	myid = p.rank()
	if myid == i:
		for j in xrange(p.size()):
			if j != myid:
				p.send(vec[i],j)
	else:
		vec[i] = p.receive(i)
Exemple #32
0
def broadcast_vec(vec, i):
    myid = p.rank()
    if myid == i:
        for j in xrange(p.size()):
            if j != myid:
                p.send(vec[i], j)
    else:
        vec[i] = p.receive(i)
Exemple #33
0
def gather( obj ):
	if root():
		result = [ None for i in xrange(p.size()) ]
		result[0] = obj
		for i in xrange(p.size()-1):
			result[i+1] = p.receive(i+1)
		return result
	else:
		p.send(obj,0)
Exemple #34
0
 def receive(self, *args, **kwargs):
     """
     Wrapper for pypar.receive
     """
     if self.is_parallel is True:
         import pypar
         return pypar.receive(*args, **kwargs)
     else:
         return None
Exemple #35
0
 def receive(self, *args, **kwargs):
     """
     Wrapper for pypar.receive
     """
     if self.is_parallel is True:
         import pypar
         return pypar.receive(*args, **kwargs)
     else:
         return None
Exemple #36
0
def gather(obj):
    if root():
        result = [None for i in xrange(p.size())]
        result[0] = obj
        for i in xrange(p.size() - 1):
            result[i + 1] = p.receive(i + 1)
        return result
    else:
        p.send(obj, 0)
    def Calculate(self, varsByCalc, params = None):
        """
        Calculate model predictions for everything in varsByCalc.

        varsByCalc is a dictionary of the form:
            dict[calc name][dep var] = ind var
        
        The return dictionary is of the form:
            dictionary[calc name][dep var][ind var] = result
        """
        if params is not None:
            self.params.update(params)

        results = {}

        calcs_to_do = varsByCalc.keys()
        # Record which calculation each node is doing
        calc_assigned = {}
        while calcs_to_do:
            # The number of calculations to do this round. We want to use
            #  all the processors if possible.
            len_this_block = min(SloppyCell.num_procs, len(calcs_to_do))

            for worker in range(1, len_this_block):
                calc = calcs_to_do.pop()
                calc_assigned[worker] = calc
                logger.debug('Assigning calculation %s to worker %i.'
                             % (calc, worker))
                command = 'Network.calculate(net, vars, params)'
                args = {'net': self.get(calc), 'vars': varsByCalc[calc],
                        'params': self.params}
                pypar.send((command, args), worker)

            # The master does his share here
            calc = calcs_to_do.pop()
            # We use the finally statement because we want to ensure that we
            #  *always* wait for replies from the workers, even if the master
            #  encounters an exception in his evaluation.
            try:
                results[calc] = self.get(calc).calculate(varsByCalc[calc], 
                                                         self.params)
            finally:
                # Collect results from the workers
                for worker in range(1, len_this_block):
                    logger.debug('Receiving result from worker %i.' % worker)
                    results[calc_assigned[worker]] = pypar.receive(worker)
                # If the master encounts an exception, we'll break out of the
                #  function ***here***

            # Check the results we received. If any is a SloppyCellException, 
            #  reraise it.
            for val in results.values():
                if isinstance(val, Utility.SloppyCellException):
                    raise val

        return results
Exemple #38
0
    def __call__(self):

        import pypar
        volume = 0

        # Need to run global command on all processors
        current_volume = self.inlet.get_global_total_water_volume()
        total_area = self.inlet.get_global_area()

        # Only the master proc calculates the update
        if self.myid == self.master_proc:
            timestep = self.domain.get_timestep()

            t = self.domain.get_time()
            Q1 = self.update_Q(t)
            Q2 = self.update_Q(t + timestep)

            volume = 0.5*(Q1+Q2)*timestep



            assert current_volume >= 0.0 , 'Volume of watrer in inlet negative!'

            for i in self.procs:
                if i == self.master_proc: continue

                pypar.send((volume, current_volume, total_area, timestep), i)
        else:
            volume, current_volume, total_area, timestep = pypar.receive(self.master_proc)


        #print self.myid, volume, current_volume, total_area, timestep

        self.applied_Q = volume/timestep
        
        # Distribute positive volume so as to obtain flat surface otherwise
        # just pull water off to have a uniform depth.
        if volume >= 0.0 :
            self.inlet.set_stages_evenly(volume)
            self.domain.fractional_step_volume_integral+=volume
            if self.velocity is not None:
                # This is done locally without communication
                depths = self.inlet.get_depths()
                self.inlet.set_xmoms(self.inlet.get_xmoms()+depths*self.velocity[0])
                self.inlet.set_ymoms(self.inlet.get_ymoms()+depths*self.velocity[1])

        elif current_volume + volume >= 0.0 :
            depth = (current_volume + volume)/total_area
            self.inlet.set_depths(depth)
            self.domain.fractional_step_volume_integral+=volume
        else: #extracting too much water!
            self.inlet.set_depths(0.0)
            self.applied_Q = current_volume/timestep
            self.domain.fractional_step_volume_integral-=current_volume
    def __call__(self):

        import pypar
        volume = 0

        # Need to run global command on all processors
        current_volume = self.inlet.get_global_total_water_volume()
        total_area = self.inlet.get_global_area()

        # Only the master proc calculates the update
        if self.myid == self.master_proc:
            timestep = self.domain.get_timestep()

            t = self.domain.get_time()
            Q1 = self.update_Q(t)
            Q2 = self.update_Q(t + timestep)

            volume = 0.5 * (Q1 + Q2) * timestep

            assert current_volume >= 0.0, 'Volume of watrer in inlet negative!'

            for i in self.procs:
                if i == self.master_proc: continue

                pypar.send((volume, current_volume, total_area, timestep), i)
        else:
            volume, current_volume, total_area, timestep = pypar.receive(
                self.master_proc)

        #print self.myid, volume, current_volume, total_area, timestep

        self.applied_Q = volume / timestep

        # Distribute positive volume so as to obtain flat surface otherwise
        # just pull water off to have a uniform depth.
        if volume >= 0.0:
            self.inlet.set_stages_evenly(volume)
            self.domain.fractional_step_volume_integral += volume
            if self.velocity is not None:
                # This is done locally without communication
                depths = self.inlet.get_depths()
                self.inlet.set_xmoms(self.inlet.get_xmoms() +
                                     depths * self.velocity[0])
                self.inlet.set_ymoms(self.inlet.get_ymoms() +
                                     depths * self.velocity[1])

        elif current_volume + volume >= 0.0:
            depth = (current_volume + volume) / total_area
            self.inlet.set_depths(depth)
            self.domain.fractional_step_volume_integral += volume
        else:  #extracting too much water!
            self.inlet.set_depths(0.0)
            self.applied_Q = current_volume / timestep
            self.domain.fractional_step_volume_integral -= current_volume
Exemple #40
0
def print_l1_stats(full_edge):

    numprocs = pypar.size()
    myid = pypar.rank()
    
    tri_norm = zeros(3, Float)
    recv_norm = zeros(3, Float)
    tri_norm[0] = l1_norm(full_edge[:, 0])
    tri_norm[1] = l1_norm(full_edge[:, 1])
    tri_norm[2] = l1_norm(full_edge[:, 2])
    if myid == 0:
        for p in range(numprocs-1):
            pypar.receive(p+1, recv_norm)
            tri_norm[0] = tri_norm[0]+recv_norm[0]
            tri_norm[1] = tri_norm[1]+recv_norm[1]
            tri_norm[2] = tri_norm[2]+recv_norm[2]
        print 'l1_norm along each axis : [', tri_norm[0],', ', tri_norm[1], ', ', tri_norm[2], ']'

    else:
        pypar.send(tri_norm, 0)
Exemple #41
0
   def process(self):
      # loop through all the rays that go through the window
      for row in xrange(self.Rows):
         for col in xrange(self.WindowCols):

            # loop through all the light sources
            for Light in self.LightSources:
               Energy = 0.0
                  
               # move the ray to the corresponding pixel on the window. dx and dy changes
               MoveUp = self.WindowHeight/2.0 - (row+self.RowStart - 0.5)*\
                        self.WindowHeight/self.WindowRows # starts from top most pixel
               MoveRight = -self.WindowWidth/2.0 + (col + 0.5)*\
                           self.WindowWidth/self.WindowCols # starts from left most pixel
                  
               # define the ray and rotate (in look direction)
               # then translate (window distance)
               Vec = np.array([self.WindowDistance, -MoveRight, MoveUp])
               Vec = self.__RotVec(Vec, self.__Lon, self.__Lat, self.Yaw)
               Photon = Ray.Ray(self.LookPos,Vec,1.0)
               
               Cont = True
               
		         # get energy after first contact
               Res = self.__getEn(Photon, Light)
               Energy += Res[0]
               UnScatRay = Res[1]
               Cont = Res[2]
               
               #pypar.barrier();
               
               for k in range(1):
                  # get energy after N contacts
                  if Cont:
                     Res = self.__getEn(UnScatRay, Light)
                     Energy += Res[0]
                     UnScatRay = Res[1]
                     Cont = Res[2]
                  
               self.Window[row, col] += Energy # set the energy to the corresponding window
  
      # stitch together the images from each process
      if self.myid != 0:
         pypar.send(self.Window, 0)
      else:
         self.Visual = np.zeros(shape = (self.WindowRows, self.WindowCols))
         RowEnd = self.WindowRows/self.numproc
         self.Visual[:RowEnd, :] = self.Window.copy()
         for Pr in range(1,self.numproc):            
            RowStart = self.WindowRows/self.numproc * Pr
            RowEnd = self.WindowRows/self.numproc * (Pr+1)
            self.Visual[RowStart:RowEnd,:] = pypar.receive(Pr)
      
         print "Elapsed time for process %d is %f" % (self.myid, time.time() - self.start)
Exemple #42
0
 def master(self):
     numcompleted = 0
     #--- start slaves distributing the first work slot
     for i in range(0, min(self.numprocs-1, self.numworks)): 
         work = i
         slave= i+1
         pypar.send(work, destination=slave, tag=PYPAR_WORKTAG) 
         print '[MASTER ]: sent first work "%s" to node %d' %(work, slave)
 
     # dispatch the remaining work slots on dynamic load-balancing policy
     # the quicker to do the job, the more jobs it takes
     for work in range(self.numprocs-1, self.numworks):
         result, status = pypar.receive(source=pypar.any_source, tag=PYPAR_WORKTAG, return_status=True) 
         print '[MASTER ]: received result from node %d' %(status.source, )
         #print result
         numcompleted += 1
         pypar.send(work, destination=status.source, tag=PYPAR_WORKTAG)
         if self.debug: print '[MASTER ]: sent work "%s" to node %d' %(work, status.source)
         
         self.work.handleWorkResult(result, status)
     
     # all works have been dispatched out
     print '[MASTER ]: ToDo : %d' %self.numworks
     print '[MASTER ]: Done : %d' %numcompleted
     
     # I've still to take into the remaining completions   
     while (numcompleted < self.numworks): 
         result, status = pypar.receive(source=pypar.any_source, tag=PYPAR_WORKTAG, return_status=True) 
         print '[MASTER ]: received (final) result from node %d' % (status.source, )
         print result
         numcompleted += 1
         print '[MASTER ]: %d completed' %numcompleted
         
         self.work.handleWorkResult(result, status)
         
     print '[MASTER ]: about to terminate slaves'
 
     # Tell slaves to stop working
     for i in range(1, self.numprocs): 
         pypar.send('#', destination=i, tag=PYPAR_DIETAG) 
         if self.debug: print '[MASTER ]: sent DIETAG to node %d' %(i,)
Exemple #43
0
def gather_dict(subdict, indexes):
    """
    Recieve a dictionary from the children where the values are 1d arrays
    and the arrays are chunks of the whole dictionary.

    :param indexes: The indexes into the whole array.
    :param subdict: The dictionary of 1d arrays to subset.
    :returns: whole array
    """
    if not STATE.is_parallel:
        return subdict
    else:
        import pypar    # pylint: disable=W0404

    # Note, putting dictionary back sequentially
    if STATE.rank == 0:
        whole = {}
        array_len = indexes[-1]  # highest index in node 0 array
        all_indexes = [[]]  # Empty list for processor 0
        for pro in range(1, STATE.size):
            temp_indexes = pypar.receive(pro)
            all_indexes.append(temp_indexes)
            if temp_indexes[-1] > array_len:
                array_len = temp_indexes[-1]
        # Create the whole dictionary, filled with rank 0 info
        for key in subdict.keys():
            # Work-out the shape of arrays
            array_shape = list(subdict[key].shape)
            array_shape[0] = array_len + 1
            whole[key] = numpy.empty(tuple(array_shape), subdict[key].dtype)
            whole[key][indexes, ...] = subdict[key]
        for pro in range(1, STATE.size):
            subdict = pypar.receive(pro)
            for key in whole.keys():
                whole[key][all_indexes[pro], ...] = subdict[key]
        return whole
    else:
        pypar.send(indexes, 0)
        pypar.send(subdict, 0)
Exemple #44
0
def gather_dict(subdict, indexes):
    """
    Recieve a dictionary from the children where the values are 1d arrays
    and the arrays are chunks of the whole dictionary.

    :param indexes: The indexes into the whole array.
    :param subdict: The dictionary of 1d arrays to subset.
    :returns: whole array
    """
    if not STATE.is_parallel:
        return subdict
    else:
        import pypar  # pylint: disable=W0404

    # Note, putting dictionary back sequentially
    if STATE.rank == 0:
        whole = {}
        array_len = indexes[-1]  # highest index in node 0 array
        all_indexes = [[]]  # Empty list for processor 0
        for pro in range(1, STATE.size):
            temp_indexes = pypar.receive(pro)
            all_indexes.append(temp_indexes)
            if temp_indexes[-1] > array_len:
                array_len = temp_indexes[-1]
        # Create the whole dictionary, filled with rank 0 info
        for key in list(subdict.keys()):
            # Work-out the shape of arrays
            array_shape = list(subdict[key].shape)
            array_shape[0] = array_len + 1
            whole[key] = numpy.empty(tuple(array_shape), subdict[key].dtype)
            whole[key][indexes, ...] = subdict[key]
        for pro in range(1, STATE.size):
            subdict = pypar.receive(pro)
            for key in list(whole.keys()):
                whole[key][all_indexes[pro], ...] = subdict[key]
        return whole
    else:
        pypar.send(indexes, 0)
        pypar.send(subdict, 0)
 def _get_statistic(self, source, statistic):
     if self.is_master:
         self.work_locally = True
         statistic_by_station = statistician.DatabaseStatistician._get_statistic(self, source, statistic)
         self.work_locally = False
         for sender in range(1, pypar.size()):
             statistic_by_station.update(pypar.receive(sender))
     else:
         if len(self.stations_local) > 0:
             statistic_by_station = statistician.DatabaseStatistician._get_statistic(self, source, statistic)
         else:
             statistic_by_station = {}
         pypar.send(statistic_by_station, root)
     return statistic_by_station
Exemple #46
0
	def loadSkew(self, numberTasks):
	
		'''Computes the skew in the number of tasks to be processed by each node
		
		The skew is the standard deviation of the task number across the nodes'''				
										
		if Environment.isParallel:
			import pypar
			if self.isRoot():
				taskDistribution = [numberTasks]
				for i in range(1, pypar.size()):
					numberTasks = pypar.receive(i)
					taskDistribution.append(numberTasks)
				
				mean = reduce(operator.add, taskDistribution)
				mean = mean/float(len(taskDistribution))
				
				#Std. dev
				stdev = 0
				for el in taskDistribution:
					stdev = stdev + math.pow((el - mean), 2)
					
				skew = stdev/float(len(taskDistribution))
				skew = math.sqrt(skew)	
				
				for i in range(1, pypar.size()):
					pypar.send(skew, i)
				
			else:
				#Send the fragment
				pypar.send(numberTasks, 0)
				#Retrieve the array
				skew = pypar.receive(0)
		else:
			skew = 0								
										
		return skew
Exemple #47
0
	def combineDictionary(self, dictFragment):
	
		'''Combines a set of arrayFragments from each processor into one array'''

		if Environment.isParallel:
			import pypar
			if self.isRoot():
				completeDict = dictFragment
				for i in range(1, pypar.size()):
					fragment = pypar.receive(i)
					completeDict.update(fragment)
				
				#Send the array
				for i in range(1, pypar.size()):
					pypar.send(completeDict, i)
			else:
				#Send the fragment
				pypar.send(dictFragment, 0)
				#Retrieve the array
				completeDict = pypar.receive(0)
		else:
			completeDict = dictFragment
		
		return completeDict			
    def grad_func(x0, *grad_args):
        sys.stdout.flush()
        x0 = numpy.asarray(x0)

        # Generate a list of parameter sets to evaluate the function at
        x_todo = [x0]
        for ii in range(len(x0)):
            eps = numpy.zeros(len(x0), numpy.float_)
            eps[ii] = epsilon
            x_todo.append(x0 + eps)

        # Break that list apart into work for each node
        x_by_node = []
        for node_ii in range(num_procs):
            x_by_node.append(x_todo[node_ii::num_procs])

        # Master sends orders to all other nodes.
        command = 'eval_func_over_list(func_str, x_todo, *grad_args)'
        for node_ii in range(1, num_procs):
            x_this_node = x_by_node[node_ii]
            arguments = {'func_str': func_str,
                         'x_todo': x_this_node}
            if send_grad_args:
                arguments['grad_args'] = grad_args
            pypar.send((command, arguments), node_ii)
        sys.stdout.flush()

        # This will hold the function evaluations done by each node.
        vals_by_node = []
        # The master node does its share of the work now
        vals_by_node.append(eval_func_over_list(func_str, x_by_node[0], 
                                                *grad_args))
        # Now receive the work done by each of the other nodes
        for node_ii in range(1, num_procs):
            vals_by_node.append(pypar.receive(node_ii))

        # Reform the function value list that's broken apart by node.
        func_evals = numpy.zeros(len(x0)+1, numpy.float_)
        for node_ii,vals_this_node in enumerate(vals_by_node):
            func_evals[node_ii::num_procs] = vals_this_node

        # Now calculate the gradient
        grad = numpy.zeros(len(x0), numpy.float_)
        f0 = func_evals[0]
        for ii,func_val in enumerate(func_evals[1:]):
            grad[ii] = (func_val - f0)/epsilon

        return grad
Exemple #49
0
def collect_arr(arr):
    """
    A useful collection routine for pypar.
    If you are using pypar to parallelize the set of nested loops and fill
    the resulting array, you usually need to combine the resulting array
    from several mpi threads. In that case you just can execute
    res=collect_arr(res)
    And it will add the arrays from all the threads and store them in
    the thread number 0
    """
    if pypar.rank() > 0:
        pypar.send(arr, 0)
    else:
        for i in range(1, pypar.size()):
            arr = arr + pypar.receive(i)
    return arr
Exemple #50
0
	def broadcast(self, data, process):
	
		'''Broadcasts data from process to all other nodes'''
		
		if Environment.isParallel:
			import pypar
			if self.rank() == process:
				#NOTE: pypar broadcast won't work even when setting vanilla
				#It always returns message trucated error.
				for i in range(pypar.size()):
					if i != self.rank():
						pypar.send(data, i)
			else:	
				data = pypar.receive(process)
		
		return data
Exemple #51
0
def collect_arr(arr):
    """
    A useful collection routine for pypar.
    If you are using pypar to parallelize the set of nested loops and fill
    the resulting array, you usually need to combine the resulting array
    from several mpi threads. In that case you just can execute
    res=collect_arr(res)
    And it will add the arrays from all the threads and store them in
    the thread number 0
    """
    if pypar.rank() > 0:
        pypar.send(arr, 0)
    else:
        for i in range(1, pypar.size()):
            arr = arr + pypar.receive(i)
    return arr
Exemple #52
0
    def grad_func(x0, *grad_args):
        sys.stdout.flush()
        x0 = numpy.asarray(x0)

        # Generate a list of parameter sets to evaluate the function at
        x_todo = [x0]
        for ii in range(len(x0)):
            eps = numpy.zeros(len(x0), numpy.float_)
            eps[ii] = epsilon
            x_todo.append(x0 + eps)

        # Break that list apart into work for each node
        x_by_node = []
        for node_ii in range(num_procs):
            x_by_node.append(x_todo[node_ii::num_procs])

        # Master sends orders to all other nodes.
        command = 'eval_func_over_list(func_str, x_todo, *grad_args)'
        for node_ii in range(1, num_procs):
            x_this_node = x_by_node[node_ii]
            arguments = {'func_str': func_str, 'x_todo': x_this_node}
            if send_grad_args:
                arguments['grad_args'] = grad_args
            pypar.send((command, arguments), node_ii)
        sys.stdout.flush()

        # This will hold the function evaluations done by each node.
        vals_by_node = []
        # The master node does its share of the work now
        vals_by_node.append(
            eval_func_over_list(func_str, x_by_node[0], *grad_args))
        # Now receive the work done by each of the other nodes
        for node_ii in range(1, num_procs):
            vals_by_node.append(pypar.receive(node_ii))

        # Reform the function value list that's broken apart by node.
        func_evals = numpy.zeros(len(x0) + 1, numpy.float_)
        for node_ii, vals_this_node in enumerate(vals_by_node):
            func_evals[node_ii::num_procs] = vals_this_node

        # Now calculate the gradient
        grad = numpy.zeros(len(x0), numpy.float_)
        f0 = func_evals[0]
        for ii, func_val in enumerate(func_evals[1:]):
            grad[ii] = (func_val - f0) / epsilon

        return grad
Exemple #53
0
 def slave(self):
     if self.debug: print '[SLAVE %d]: I am processor %d of %d on node %s' % (self.myid, self.myid, self.numprocs, self.node)
     if self.debug: print '[SLAVE %d]: Entering work loop' % (self.myid,)
     while True:
         result, status = pypar.receive(source=0, tag=pypar.any_tag, return_status=True) 
         print '[SLAVE %d]: received work with tag %d from node %d'\
                   %(self.myid, status.tag, status.source)
        
         if (status.tag == PYPAR_DIETAG):
             print '[SLAVE %d]: received termination from node %d' % (self.myid, 0)
             return
         else:
             worknum = result
             if self.debug: print '[SLAVE %d]: work number is %s' % (self.myid, worknum)
             myresult = self.work.calcWorkResult(worknum)
             pypar.send(myresult, destination=0)
             if self.debug: print '[SLAVE %d]: sent result to node %d' % (self.myid, 0)
Exemple #54
0
 def calc_num_blocks(self):
     """
     pre-req: calc_lo_hi has been calculated - and only calculated once!
     """
     if self.is_parallel is True:
         import pypar
         # print "synchronise self.rank", self.rank
         if self.rank == 0:
             calc_num_blocks = self._make_block_file
             for source in range(1, self.size):
                 # print "waiting.."
                 received = pypar.receive(source)
                 # print "received", received
                 calc_num_blocks += received
             return calc_num_blocks
         else:
             # print "sending from ", self.rank
             pypar.send(self._make_block_file, 0)
Exemple #55
0
def collected(iterable):
    """
    Collect iterables back to the master.
    """
    P = pp.size()
    if P == 1:
        for el in iterable:
            yield el
    else:
        if pp.rank() == 0:
            results = list(iterable)
            for p in range(1, P):
                pres = pp.receive(p)
                results.extend(pres)
            for el in results:
                yield el
        else:
            pp.send(list(iterable), 0)
Exemple #56
0
 def calc_num_blocks(self):
     """
     pre-req: calc_lo_hi has been calculated - and only calculated once!
     """
     if self.is_parallel is True:
         import pypar
         # print "synchronise self.rank", self.rank
         if self.rank == 0:
             calc_num_blocks = self._make_block_file
             for source in range(1, self.size):
                 # print "waiting.."
                 received = pypar.receive(source)
                 # print "received", received
                 calc_num_blocks += received
             return calc_num_blocks
         else:
             # print "sending from ", self.rank
             pypar.send(self._make_block_file, 0)
def ensemble_trajs(net, times, ensemble):
    """
    Return a list of trajectories evaluated at times for all parameter sets
    in ensemble.
    """
    traj_set = []
    elems_assigned = [ensemble[node::num_procs] for node in range(num_procs)]
    for worker in range(1, num_procs):
        command = 'Ensembles.few_ensemble_trajs(net, times, elements)'
        args = {'net': net, 'times': times, 'elements': elems_assigned[worker]}
        pypar.send((command, args), worker)

    traj_set = few_ensemble_trajs(net, times, elems_assigned[0])

    for worker in range(1, num_procs):
        traj_set.extend(pypar.receive(worker))

    return traj_set