예제 #1
0
def jobProcessor(value):
    hostID = MPI.hostID()
    taskID = MPI.taskID()

    print("Processing " + str(value) + " on " + str(hostID) + " " +
          str(taskID))
    return 2 * value
예제 #2
0
def printMsgMPI(msg, n):
	hostID=MPI.hostID()
	taskID=MPI.taskID()
	
	for ii in range(n):
		print("h="+str(hostID)+" t="+str(taskID)+": "+msg+" : "+str(ii))
		cOS.Yield()
	return n
예제 #3
0
def printMsgMPI(msg, n):
    hostID = MPI.hostID()
    taskID = MPI.taskID()

    for ii in range(n):
        print("h=" + str(hostID) + " t=" + str(taskID) + ": " + msg + " : " +
              str(ii))
        cOS.Yield()
    return n
예제 #4
0
def deRun(prob, popSize, runIndex, maxiter=75000, maxGen=1500, w=0.5, pc=0.3):
	hostID=MPI.hostID()
	taskID=MPI.taskID()
	print str(hostID)+" "+str(taskID)+(" evaluating %s, run=%2d, popsize=%3d" % (prob.name, runIndex+1, popSize))
	
	random.seed()
	opt=DifferentialEvolution(
		prob, prob.xl, prob.xh, debug=0, maxiter=maxiter, 
		maxGen=maxGen, populationSize=popSize, w=w, pc=pc
	)
	cc=CostCollector()
	opt.installPlugin(cc)
	opt.reset(zeros(len(prob.xl)))
	opt.run()
	cc.finalize()
	
	return (opt.f, cc.fval)
	
예제 #5
0
def deRun(prob, popSize, runIndex, maxiter=75000, maxGen=1500, w=0.5, pc=0.3):
    hostID = MPI.hostID()
    taskID = MPI.taskID()
    print str(hostID) + " " + str(taskID) + (
        " evaluating %s, run=%2d, popsize=%3d" %
        (prob.name, runIndex + 1, popSize))

    random.seed()
    opt = DifferentialEvolution(prob,
                                prob.xl,
                                prob.xh,
                                debug=0,
                                maxiter=maxiter,
                                maxGen=maxGen,
                                populationSize=popSize,
                                w=w,
                                pc=pc)
    cc = CostCollector()
    opt.installPlugin(cc)
    opt.reset(zeros(len(prob.xl)))
    opt.run()
    cc.finalize()

    return (opt.f, cc.fval)
예제 #6
0
# Print statistics

import sys

# Starting a task with mpirun starts multiple identical processes. 
# If MPI is imported then the main program is executed only at slot 0. 
# If not, all slots execute the main program. 
from pyopus.parallel.mpi import MPI as VM

if __name__=='__main__':
	vm=VM(debug=2)
	
	# Print info
	print("---- Master")
	print("Host ID   : "+str(vm.hostID()))
	print("Task ID   : "+str(vm.taskID()))
	print("Parent ID : "+str(vm.parentTaskID()))
	
	# Print hosts and processes
	print("---- Hosts and tasks\n"+vm.formatSpawnerConfig()+"----")
	
	# Print process slot info
	print("Total process slots: "+str(vm.slots()))
	print("Free process slots : "+str(vm.freeSlots()))
	
	vm.finalize()
	
예제 #7
0
def jobProcessor(value):
	hostID=MPI.hostID()
	taskID=MPI.taskID()
	
	print("Processing "+str(value)+ " on "+ str(hostID)+" "+str(taskID))
	return 2*value
예제 #8
0
# Print statistics

import sys

# Starting a task with mpirun starts multiple identical processes.
# If MPI is imported then the main program is executed only at slot 0.
# If not, all slots execute the main program.
from pyopus.parallel.mpi import MPI as VM

if __name__ == '__main__':
    vm = VM(debug=2)

    # Print info
    print("---- Master")
    print("Host ID   : " + str(vm.hostID()))
    print("Task ID   : " + str(vm.taskID()))
    print("Parent ID : " + str(vm.parentTaskID()))

    # Print hosts and processes
    print("---- Hosts and tasks\n" + vm.formatSpawnerConfig() + "----")

    # Print process slot info
    print("Total process slots: " + str(vm.slots()))
    print("Free process slots : " + str(vm.freeSlots()))

    vm.finalize()