# Dispatches a fixed number of tasks to computational nodes # mpirun -n 4 python 03-dispatch.py from pyopus.parallel.cooperative import cOS from pyopus.parallel.mpi import MPI from funclib import jobGenerator, jobProcessor, jobCollector if __name__=='__main__': # Set up MPI cOS.setVM(MPI()) results=cOS.dispatch( jobList=((jobProcessor, [value]) for value in xrange(100)), remote=True ) print("Results: "+str(results)) # Finish, need to do this if MPI is used cOS.finalize()
finalF[iFunc][iPopSize][iRun]=fBest except: print "Finished" if __name__=='__main__': cOS.setVM(MPI(startupDir=os.getcwd())) # Prepare results storage finalF=zeros((len(funcIndicesList), len(popSizeList), nRun)) # Dispatch jobs cOS.dispatch( jobList=jobGenerator(), collector=jobCollector(finalF), remote=True ) # Prepare function names names=[] dims=[] for i in funcIndicesList: prob=glbc.GlobalBCsuite[i]() names.append(prob.name) dims.append(prob.n) # Store summary summary={ 'funcIndices': funcIndicesList, 'names': names,
createRandomBigCircuitMatrix(copy(createRandomValueVector()))) if globalVars.insertAdam: hotGen.pool[ 0] = AE.adam #Insert an already designed circuit to optimise. hotGen.pool[ 0].fullRedundancyMatrix = fullRedundancyBigCircuitMatrix( AE.adam.BigCircuitMatrix) print "Adam-circuit inserted into population." raw_input("...initial population created.") #---EVALUATE & SORT INITIAL POPULATION---# print "EVALUATING GENERATION %d" % generationNum stw0 = time() results = cOS.dispatch(jobList=((PROBLEM, [hotGen.pool[i], generationNum, i, True]) for i in range(0, len(hotGen.pool))), remote=True) results = np.array(results) #Put together objects and objectivesScores for i in range(0, len(hotGen.pool)): hotGen.pool[i].objectivesScore = np.transpose(results[:, 0])[i] stw1 = time() print "Evaluation of initial population lasted for %f s" % (stw1 - stw0) #Sort population staSort = time() faster_nondominated_sort(hotGen) endSort = time() print "Fast_nondominated_sort finished in %.2f s." % (endSort - staSort)
# Dispatches tasks to computational nodes until specified result is reached or exceeded # This example also demonstrates the use of a collector. # mpirun -n 4 python 04-dyndispatch.py from pyopus.parallel.cooperative import cOS from pyopus.parallel.mpi import MPI from funclib import dynJobGenerator, jobProcessor, jobCollector if __name__ == '__main__': # Set up MPI cOS.setVM(MPI()) # This list will be filled with results results = [] cOS.dispatch( jobList=dynJobGenerator(start=0, step=1), # Start at 0, increase by one collector=jobCollector(results, stopAtResult=150), remote=True) print("Results: " + str(results)) # Finish, need to do this if MPI is used cOS.finalize()
bestScoresList = data[2] result = data[3] bestI = data[4] #datadirname = data[5] BigMatrixSize = data[6] POP_SIZE = data[7] averageScoresList = data[8] #generations.append(generation) hotGen = deepcopy(generation) raw_input("...old population resurrected.") else: print "Creating initial population. Press any to proceed..." NEWindividuals = cOS.dispatch( jobList=((dispatchRandomCircuitObjectGeneration, [i]) for i in range(0, POP_SIZE)), remote=True) for i in range(0, POP_SIZE): hotGen.add_individual(NEWindividuals[i]) if insertAdam: hotGen.pool[ 0] = AE.adam #Insert an already designed circuit to optimise. hotGen.pool[ 0].fullRedundancyMatrix = fullRedundancyBigCircuitMatrix( AE.adam.BigCircuitMatrix) print "Adam-circuit inserted into population." raw_input("...initial population created.") #---CREATE INITIAL POPULATION---# #---EVALUATE & SORT INITIAL POPULATION---#
# Dispatches tasks to computational nodes until specified result is reached or exceeded # This example also demonstrates the use of a collector. # mpirun -n 4 python 04-dyndispatch.py from pyopus.parallel.cooperative import cOS from pyopus.parallel.mpi import MPI from funclib import dynJobGenerator, jobProcessor, jobCollector if __name__=='__main__': # Set up MPI cOS.setVM(MPI()) # This list will be filled with results results=[] cOS.dispatch( jobList=dynJobGenerator(start=0, step=1), # Start at 0, increase by one collector=jobCollector(results, stopAtResult=150), remote=True ) print("Results: "+str(results)) # Finish, need to do this if MPI is used cOS.finalize()