예제 #1
0
		'dif_l':    2.170269e-06,
		'dif_w':    4.396577e-06,
		'load_l':    2.800742e-06,
		'load_w':    6.228898e-05,
		'mirr_l':    3.378882e-07,
		'mirr_ld':    2.072842e-06,
		'mirr_w':    5.311666e-05,
		'mirr_wd':    1.979695e-06,
		'mirr_wo':    4.983195e-05,
		'out_l':    6.257517e-07,
		'out_w':    3.441845e-05,
		'r_out':    1.402169e+05
	}

	# Prepare parallel environment
	cOS.setVM(MPI(mirrorMap={'*':'.'}))
	
	# 3-sigma target yield
	yt=YieldTargeting(
		designParams, statParams, opParams, 
		heads, analyses, measures, variables=variables, 
		beta=3.0, wcSpecs=wcList, 
		# Comment out to use default initial point (lo+hi)/2
		# initial=atDesign, 
		initialNominalDesign=True, 
		# Norms for measures with zero goal
		norms={ 'area': 100e-12, 'vgs_drv': 1e-3, 'vds_drv':1e-3 }, 
		tradeoffs=1e-6, # Tradeoff optimization weight, can be overridden in *CbdOptions
		stopWhenAllSatisfied=True, 
		# Initial nominal optimization
		initialCbdOptions={ 
예제 #2
0
from pyopus.parallel.cooperative import cOS
from pyopus.parallel.mpi import MPI
from funclib import jobProcessor

# Result at which we stop
stopAtResult = 150

# Minimal and maximal number of parallel tasks
# The maximal number of parallel tasks can be infinite (set maxTasks to None)
minTasks = 1
maxTasks = 1000

if __name__ == '__main__':
    # Set up MPI
    cOS.setVM(MPI())

    # Thsi list will hold the jobs (values that are doubled)
    jobs = []

    # This list will be filled with results
    results = []

    # Stop the loop
    stop = False

    # Running task status storage
    running = {}

    # Job index of next job
    atJob = 0
예제 #3
0
# Outsources tasks, run this example as
#  mpirun -n 4 python 02-remote.py

from pyopus.parallel.cooperative import cOS
from pyopus.parallel.mpi import MPI
from funclib import printMsgMPI

if __name__=='__main__':
	# Set up MPI
	cOS.setVM(MPI())

	# Spawn two tasks (locally)
	tidA=cOS.Spawn(printMsgMPI, kwargs={'msg': 'Hello A', 'n': 10})
	tidB=cOS.Spawn(printMsgMPI, kwargs={'msg': 'Hello B', 'n': 20})

	# Spawn two remote tasks
	tidC=cOS.Spawn(printMsgMPI, kwargs={'msg': 'Hello C', 'n': 15}, remote=True)
	tidD=cOS.Spawn(printMsgMPI, kwargs={'msg': 'Hello D', 'n': 18}, remote=True)

	# IDs of running tasks
	running=set([tidA,tidB,tidC,tidD])

	# Wait for all tasks to finish
	while len(running)>0:
		# Wait for any task
		retval=cOS.Join()
		# Wait for specified tasks
		# retval=cOS.Join(running)
		
		# Remove IDs of finished tasks
		for tid in retval.keys():
예제 #4
0
    atDesign = {
        'dif_l': 6.816424e-07,
        'dif_w': 3.332037e-06,
        'nl_l': 2.655088e-06,
        'nl_w': 4.977226e-05,
        'nm_l': 3.665018e-06,
        'nm_w': 7.507191e-05,
        'pm_l': 1.487570e-06,
        'pm_w0': 2.871096e-05,
        'pm_w1': 2.871096e-05,
        'pm_w2': 6.389441e-05,
        'pm_w3': 8.310102e-05,
    }

    # Prepare parallel environment
    cOS.setVM(MPI(mirrorMap={'*': '.'}))

    # 3-sigma target yield
    yt = YieldTargeting(
        designParams,
        statParams,
        opParams,
        heads,
        analyses,
        measures,
        variables=variables,
        beta=3.0,
        wcSpecs=wcList,
        # Comment out to use default initial point (lo+hi)/2
        # initial=atDesign,
        initialNominalDesign=True,
예제 #5
0
파일: depop.py 프로젝트: ustaros-ai/pyopus
			
			print "Received func=%2d, run=%2d, popsize=%3d "
			
			if writeFhist:
				fp=open("fhist_f%d_p%d_r%d.pck" % (funcIndicesList[iFunc], popSizeList[iPopSize], iRun), "wb")
				dump(fHistory, fp, protocol=-1)
				fp.close()
			
			finalF[iFunc][iPopSize][iRun]=fBest
	except:
		print "Finished"
	
	
	
if __name__=='__main__':
	cOS.setVM(MPI(startupDir=os.getcwd()))
	
	# Prepare results storage
	finalF=zeros((len(funcIndicesList), len(popSizeList), nRun))
	
	# Dispatch jobs
	cOS.dispatch(
		jobList=jobGenerator(), 
		collector=jobCollector(finalF), 
		remote=True 
	)
	
	# Prepare function names
	names=[]
	dims=[]
	for i in funcIndicesList:
예제 #6
0
    shutil.copy2(
        '../_MAIN_work/globalVars.py', datadirname +
        '/globalVars.py')  #copy current globalVars script for logging reasons
    shutil.copy2('../_MAIN_work/scoreFunctions.py',
                 datadirname + '/scoreFunctions.py')
    output = open("data.pkl", "wb")
    os.chdir("../_MAIN_work")

    # Set up MPI for parallel computing

    cOS.setVM(
        MPI(
            mirrorMap={
                'models.inc': '.',
                'topdc.cir': '.',
                'topdc_psrr.cir': '.',
                'mosmm.inc': '.',
                'cmos180n.lib': '.',
                #'testTopCirc_hspice_AF.cir':'.',
                #'testTopCirc_hspice_AF_outimped.cir':'.'
            }))

    generationNum = 0
    optimizedIndivids = []  #PSADE parameter-optimized individuals

    hotGen = population()  #hot generation (current)
    oldGen = population()  #previous generation (old)

    currentBestScore = np.Inf
    bestScoresList = []
    averageScoresList = []