Пример #1
0
def jobProcessor(value):
    hostID = MPI.hostID()
    taskID = MPI.taskID()

    print("Processing " + str(value) + " on " + str(hostID) + " " +
          str(taskID))
    return 2 * value
Пример #2
0
def printMsgMPI(msg, n):
	hostID=MPI.hostID()
	taskID=MPI.taskID()
	
	for ii in range(n):
		print("h="+str(hostID)+" t="+str(taskID)+": "+msg+" : "+str(ii))
		cOS.Yield()
	return n
Пример #3
0
def printMsgMPI(msg, n):
    hostID = MPI.hostID()
    taskID = MPI.taskID()

    for ii in range(n):
        print("h=" + str(hostID) + " t=" + str(taskID) + ": " + msg + " : " +
              str(ii))
        cOS.Yield()
    return n
Пример #4
0
def deRun(prob, popSize, runIndex, maxiter=75000, maxGen=1500, w=0.5, pc=0.3):
	hostID=MPI.hostID()
	taskID=MPI.taskID()
	print str(hostID)+" "+str(taskID)+(" evaluating %s, run=%2d, popsize=%3d" % (prob.name, runIndex+1, popSize))
	
	random.seed()
	opt=DifferentialEvolution(
		prob, prob.xl, prob.xh, debug=0, maxiter=maxiter, 
		maxGen=maxGen, populationSize=popSize, w=w, pc=pc
	)
	cc=CostCollector()
	opt.installPlugin(cc)
	opt.reset(zeros(len(prob.xl)))
	opt.run()
	cc.finalize()
	
	return (opt.f, cc.fval)
	
Пример #5
0
def deRun(prob, popSize, runIndex, maxiter=75000, maxGen=1500, w=0.5, pc=0.3):
    hostID = MPI.hostID()
    taskID = MPI.taskID()
    print str(hostID) + " " + str(taskID) + (
        " evaluating %s, run=%2d, popsize=%3d" %
        (prob.name, runIndex + 1, popSize))

    random.seed()
    opt = DifferentialEvolution(prob,
                                prob.xl,
                                prob.xh,
                                debug=0,
                                maxiter=maxiter,
                                maxGen=maxGen,
                                populationSize=popSize,
                                w=w,
                                pc=pc)
    cc = CostCollector()
    opt.installPlugin(cc)
    opt.reset(zeros(len(prob.xl)))
    opt.run()
    cc.finalize()

    return (opt.f, cc.fval)
Пример #6
0
# Measures the message delay and average transfer speed for messages of various sizes

import sys
from pyopus.parallel.mpi import MPI as VM
from pyopus.parallel.base import MsgTaskExit, MsgTaskResult
import funclib
import os, time
import numpy as np

if __name__ == '__main__':
    # Set work direcotry on worker to be the same as on the spawner.
    vm = VM(startupDir=os.getcwd(), debug=1)

    # Get hosts, find a non-local host
    myHostID = vm.hostID()

    # Find a remote host
    for hostID in vm.hosts():
        if hostID != myHostID:
            break

    # See if we have at least one remote host.
    if hostID == myHostID:
        print("\nWarning. Measuring local communication speed.")

    # Prepare data sizes
    dataSizes = [0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000]

    # Spawn bounceBack()
    taskIDs = vm.spawnFunction(funclib.bounceBack,
                               kwargs={'vm': vm},
Пример #7
0
			
			print "Received func=%2d, run=%2d, popsize=%3d "
			
			if writeFhist:
				fp=open("fhist_f%d_p%d_r%d.pck" % (funcIndicesList[iFunc], popSizeList[iPopSize], iRun), "wb")
				dump(fHistory, fp, protocol=-1)
				fp.close()
			
			finalF[iFunc][iPopSize][iRun]=fBest
	except:
		print "Finished"
	
	
	
if __name__=='__main__':
	cOS.setVM(MPI(startupDir=os.getcwd()))
	
	# Prepare results storage
	finalF=zeros((len(funcIndicesList), len(popSizeList), nRun))
	
	# Dispatch jobs
	cOS.dispatch(
		jobList=jobGenerator(), 
		collector=jobCollector(finalF), 
		remote=True 
	)
	
	# Prepare function names
	names=[]
	dims=[]
	for i in funcIndicesList:
Пример #8
0
# Asynchronous spawning of multiple tasks

import sys
from pyopus.parallel.mpi import MPI as VM
	
from pyopus.parallel.base import MsgTaskExit, MsgTaskResult

import funclib
import os

if __name__=='__main__':
	vm=VM(startupDir=os.getcwd(), debug=0)
	
	# Prepare expressions
	exprList=["1+1", "5*5", "bla*bla", "2**7"]
	
	# Create expression to taskID map, initialize values to None
	expr2taskID={}
	expr2taskID.fromkeys(exprList)
	
	# Spawn evaluators that send MsgTaskResult messages with return value (sendBack=True). 
	taskIDList=[]
	taskCount=0
	for expr in exprList:
		print("Spawning evaluator for: "+expr)
		taskIDs=vm.spawnFunction(funclib.pyEvaluator, kwargs={'vm': vm, 'expr': expr}, count=1, sendBack=True)
		if len(taskIDs)>0:
			# Spawn OK
			taskIDList.extend(taskIDs)
			expr2taskID[expr]=taskIDs[0]
			taskCount+=1
Пример #9
0
    shutil.copy2(
        '../_MAIN_work/globalVars.py', datadirname +
        '/globalVars.py')  #copy current globalVars script for logging reasons
    shutil.copy2('../_MAIN_work/scoreFunctions.py',
                 datadirname + '/scoreFunctions.py')
    output = open("data.pkl", "wb")
    os.chdir("../_MAIN_work")

    # Set up MPI for parallel computing

    cOS.setVM(
        MPI(
            mirrorMap={
                'models.inc': '.',
                'topdc.cir': '.',
                'topdc_psrr.cir': '.',
                'mosmm.inc': '.',
                'cmos180n.lib': '.',
                #'testTopCirc_hspice_AF.cir':'.',
                #'testTopCirc_hspice_AF_outimped.cir':'.'
            }))

    generationNum = 0
    optimizedIndivids = []  #PSADE parameter-optimized individuals

    hotGen = population()  #hot generation (current)
    oldGen = population()  #previous generation (old)

    currentBestScore = np.Inf
    bestScoresList = []
    averageScoresList = []
Пример #10
0
# Demonstrates mirrored storage cleanup

import sys
from pyopus.parallel.mpi import MPI as VM

import funclib
import os

if __name__ == '__main__':
    vm = VM(debug=2)

    # Clean up local storage on all machines in the virtual machine.
    print("\nCleaning up.")
    vm.clearLocalStorage(timeout=6.0)

    vm.finalize()
Пример #11
0
# Print statistics

import sys

# Starting a task with mpirun starts multiple identical processes. 
# If MPI is imported then the main program is executed only at slot 0. 
# If not, all slots execute the main program. 
from pyopus.parallel.mpi import MPI as VM

if __name__=='__main__':
	vm=VM(debug=2)
	
	# Print info
	print("---- Master")
	print("Host ID   : "+str(vm.hostID()))
	print("Task ID   : "+str(vm.taskID()))
	print("Parent ID : "+str(vm.parentTaskID()))
	
	# Print hosts and processes
	print("---- Hosts and tasks\n"+vm.formatSpawnerConfig()+"----")
	
	# Print process slot info
	print("Total process slots: "+str(vm.slots()))
	print("Free process slots : "+str(vm.freeSlots()))
	
	vm.finalize()
	
Пример #12
0
# Spawns a remote task, handles messages (collects results, detects exit)

import sys
from pyopus.parallel.mpi import MPI as VM
# By default stdout is forwarded to the mpirun terminal

import funclib 
from pyopus.parallel.base import MsgTaskExit, MsgTaskResult
import os, time

if __name__=='__main__':
	# Startup dir must be the same as the one where funclib is located 
	# so we can import it (funclib is not in PYTHONPATH). 
	# MPI guarantees this by default, while PVM does not. 
	vm=VM(startupDir=os.getcwd(), debug=2)
	
	# Get host list. 
	hostIDs=vm.hosts()
	initialFreeSlots=vm.freeSlots()
	print("Hosts: ")
	for hostID in hostIDs:
		print("  "+str(hostID))
	print("Free slots: "+str(initialFreeSlots))
	
	# Spawn 2 tasks anywhere, send vm as argument with name 'vm'.  
	# The spawned function must be defined in an importable module outside main .py file. 
	print("\nSpawning 2 tasks, anywhere.")
	taskIDs=vm.spawnFunction(funclib.hello, kwargs={'vm': vm}, count=2)
	print("Spawned: ")
	for task in taskIDs:
		print "  ", str(task)
Пример #13
0
# Demonstrates file mirroring

import sys
from pyopus.parallel.mpi import MPI as VM
	
import funclib
import os

if __name__=='__main__':
	# Startup dir must contain funclib so we can import it on a worker 
	# (funclib is not in PYTHONPATH). 
	# Mirror current dir on spawner to workers local storage. 
	# Startupdir is by default the created local storage dir. 
	vm=VM(mirrorMap={'*':'.'}, debug=2)
	
	# Spawn 1 task anywhere, send vm as argument with name 'vm'.  
	# The spawned function must be defined in an importable module outside main .py file. 
	# Print some status information and local storage layout. 
	print("\nSpawning task.")
	taskIDs=vm.spawnFunction(funclib.helloLs, kwargs={'vm': vm}, count=1)
	print taskIDs
	print("Spawned: "+str(taskIDs[0]))
	print("Collecting stdout ...")
	
	# Wait for a message, e.g. TaskExit
	vm.receiveMessage()
	
	vm.finalize()
Пример #14
0
# Print statistics

import sys

# Starting a task with mpirun starts multiple identical processes.
# If MPI is imported then the main program is executed only at slot 0.
# If not, all slots execute the main program.
from pyopus.parallel.mpi import MPI as VM

if __name__ == '__main__':
    vm = VM(debug=2)

    # Print info
    print("---- Master")
    print("Host ID   : " + str(vm.hostID()))
    print("Task ID   : " + str(vm.taskID()))
    print("Parent ID : " + str(vm.parentTaskID()))

    # Print hosts and processes
    print("---- Hosts and tasks\n" + vm.formatSpawnerConfig() + "----")

    # Print process slot info
    print("Total process slots: " + str(vm.slots()))
    print("Free process slots : " + str(vm.freeSlots()))

    vm.finalize()
Пример #15
0
# Spawns a remote task, handles messages (collects results, detects exit)

import sys
from pyopus.parallel.mpi import MPI as VM
# By default stdout is forwarded to the mpirun terminal

import funclib
from pyopus.parallel.base import MsgTaskExit, MsgTaskResult
import os, time

if __name__ == '__main__':
    # Startup dir must be the same as the one where funclib is located
    # so we can import it (funclib is not in PYTHONPATH).
    # MPI guarantees this by default, while PVM does not.
    vm = VM(startupDir=os.getcwd(), debug=2)

    # Get host list.
    hostIDs = vm.hosts()
    initialFreeSlots = vm.freeSlots()
    print("Hosts: ")
    for hostID in hostIDs:
        print("  " + str(hostID))
    print("Free slots: " + str(initialFreeSlots))

    # Spawn 2 tasks anywhere, send vm as argument with name 'vm'.
    # The spawned function must be defined in an importable module outside main .py file.
    print("\nSpawning 2 tasks, anywhere.")
    taskIDs = vm.spawnFunction(funclib.hello, kwargs={'vm': vm}, count=2)
    print("Spawned: ")
    for task in taskIDs:
        print "  ", str(task)
Пример #16
0
    atDesign = {
        'dif_l': 6.816424e-07,
        'dif_w': 3.332037e-06,
        'nl_l': 2.655088e-06,
        'nl_w': 4.977226e-05,
        'nm_l': 3.665018e-06,
        'nm_w': 7.507191e-05,
        'pm_l': 1.487570e-06,
        'pm_w0': 2.871096e-05,
        'pm_w1': 2.871096e-05,
        'pm_w2': 6.389441e-05,
        'pm_w3': 8.310102e-05,
    }

    # Prepare parallel environment
    cOS.setVM(MPI(mirrorMap={'*': '.'}))

    # 3-sigma target yield
    yt = YieldTargeting(
        designParams,
        statParams,
        opParams,
        heads,
        analyses,
        measures,
        variables=variables,
        beta=3.0,
        wcSpecs=wcList,
        # Comment out to use default initial point (lo+hi)/2
        # initial=atDesign,
        initialNominalDesign=True,
Пример #17
0
# Measures the message delay and average transfer speed for messages of various sizes 

import sys
from pyopus.parallel.mpi import MPI as VM
from pyopus.parallel.base import MsgTaskExit, MsgTaskResult
import funclib
import os, time
import numpy as np

if __name__=='__main__':
	# Set work direcotry on worker to be the same as on the spawner. 
	vm=VM(startupDir=os.getcwd(), debug=1)
	
	# Get hosts, find a non-local host
	myHostID=vm.hostID()
	
	# Find a remote host
	for hostID in vm.hosts():
		if hostID!=myHostID:
			break
	
	# See if we have at least one remote host. 
	if hostID==myHostID:
		print("\nWarning. Measuring local communication speed.")
	
	# Prepare data sizes
	dataSizes=[0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000]
	
	# Spawn bounceBack()
	taskIDs=vm.spawnFunction(funclib.bounceBack, kwargs={'vm': vm}, targetList=[hostID], count=1)
	
Пример #18
0
def jobProcessor(value):
	hostID=MPI.hostID()
	taskID=MPI.taskID()
	
	print("Processing "+str(value)+ " on "+ str(hostID)+" "+str(taskID))
	return 2*value
Пример #19
0
from pyopus.parallel.cooperative import cOS
from pyopus.parallel.mpi import MPI
from funclib import jobProcessor

# Result at which we stop
stopAtResult = 150

# Minimal and maximal number of parallel tasks
# The maximal number of parallel tasks can be infinite (set maxTasks to None)
minTasks = 1
maxTasks = 1000

if __name__ == '__main__':
    # Set up MPI
    cOS.setVM(MPI())

    # Thsi list will hold the jobs (values that are doubled)
    jobs = []

    # This list will be filled with results
    results = []

    # Stop the loop
    stop = False

    # Running task status storage
    running = {}

    # Job index of next job
    atJob = 0
Пример #20
0
# Demonstrates mirrored storage cleanup

import sys
from pyopus.parallel.mpi import MPI as VM
	
import funclib
import os

if __name__=='__main__':
	vm=VM(debug=2)
	
	# Clean up local storage on all machines in the virtual machine. 
	print("\nCleaning up.")
	vm.clearLocalStorage(timeout=6.0)

	vm.finalize()