Exemplo n.º 1
0
 def __init__(self, testName, outputPathBase=None,
         basePath=None, nproc=1, timeout=None):
     if basePath is None:
         # Since expect this class to be used directly,
         #  get calling path 1 levels up
         basePath = credo.utils.getCallingPath(1)
     if outputPathBase == None:
         outputPathBase = os.path.join("output", testName)
     SysTest.__init__(self, "SciBenchmark", testName, basePath, 
         outputPathBase, nproc, timeout)
     # In the case of SciBenchmarks, we will auto-create the 
     # ModelSuite here, since it's up to the user to configure 
     # this rather than being done automatically in getSuite().
     self.mSuite = ModelSuite(outputPathBase=self.outputPathBase)
Exemplo n.º 2
0
 def test_generateRuns_izip(self):        
     mSuite = ModelSuite(os.path.join("output","genSuiteTest"),
         templateMRun = self.mRun1)
     mSuite.addVariant("depthVary", self.stgI1)
     mSuite.addVariant("ZVary", self.stgI2)
     mSuite.generateRuns(itertools.izip)
     self.assertEqual(len(mSuite.runs),
         min(len(self.yRange), len(self.zRange)))
     # These are indices into lists above, created manually for testing
     expIndices = [(0,0),(1,1)]
     for ii, expIndexTuple in enumerate(expIndices):
         yIndex, zIndex = expIndexTuple
         self.assertEqual(mSuite.runs[ii].paramOverrides['minY'],
             self.yRange[yIndex])
         self.assertEqual(mSuite.runs[ii].paramOverrides['maxZ'],
             self.zRange[zIndex])
         self.assertEqual(mSuite.runs[ii].outputPath,
             os.path.join("output", "genSuiteTest",
                 mSuite.subOutputPathGenFunc(mSuite.runs[ii],
                 mSuite.modelVariants, expIndexTuple, ii)))
jobRunner = credo.jobrunner.defaultRunner()

outPathBase = os.path.join('output','PPC_Compare')
if not os.path.exists(outPathBase):
    os.makedirs(outPathBase)

defParams = SimParams(nsteps=2)
stdRun = ModelRun("Arrhenius-normal",
    os.path.join('..','..', 'Underworld', 'InputFiles', 'Arrhenius.xml'),
    simParams=defParams)
ppcRun = ModelRun("Arrhenius-ppc", "Arrhenius.xml",
    basePath=os.path.join("Ppc_Testing","udw_inputfiles"),
    simParams=defParams)

stdSuite = ModelSuite(os.path.join(outPathBase, "arrBasic"))
ppcSuite = ModelSuite(os.path.join(os.getcwd(), outPathBase, "arrPIC"))

for ii in range(10):
    stdRun.outputPath = os.path.join(stdSuite.outputPathBase, "%.5d" % ii)
    ppcRun.outputPath = os.path.join(ppcSuite.outputPathBase, "%.5d" % ii)
    stdSuite.addRun(copy.deepcopy(stdRun))
    ppcSuite.addRun(copy.deepcopy(ppcRun))

stdResults = jobRunner.runSuite(stdSuite)
ppcResults = jobRunner.runSuite(ppcSuite)

#-----------------------------

cpuRegs = []
cpuPPCs = []
Exemplo n.º 4
0
outPathBase = os.path.join('output', 'PPC_Compare')
if not os.path.exists(outPathBase):
    os.makedirs(outPathBase)

defParams = SimParams(nsteps=2)
stdRun = ModelRun("Arrhenius-normal",
                  os.path.join('..', '..', 'Underworld', 'InputFiles',
                               'Arrhenius.xml'),
                  simParams=defParams)
ppcRun = ModelRun("Arrhenius-ppc",
                  "Arrhenius.xml",
                  basePath=os.path.join("Ppc_Testing", "udw_inputfiles"),
                  simParams=defParams)

stdSuite = ModelSuite(os.path.join(outPathBase, "arrBasic"))
ppcSuite = ModelSuite(os.path.join(os.getcwd(), outPathBase, "arrPIC"))

for ii in range(10):
    stdRun.outputPath = os.path.join(stdSuite.outputPathBase, "%.5d" % ii)
    ppcRun.outputPath = os.path.join(ppcSuite.outputPathBase, "%.5d" % ii)
    stdSuite.addRun(copy.deepcopy(stdRun))
    ppcSuite.addRun(copy.deepcopy(ppcRun))

stdResults = jobRunner.runSuite(stdSuite)
ppcResults = jobRunner.runSuite(ppcSuite)

#-----------------------------

cpuRegs = []
cpuPPCs = []
import os
from credo.modelrun import ModelRun, SimParams
from credo.modelsuite import ModelSuite, StgXMLVariant
import credo.modelsuite as msuite
import credo.jobrunner
from credo.analysis import modelplots

import credo_rayTaySuite

elRes=32
stoptime=2500.0
mRun = ModelRun("RayTay-basic", "RayleighTaylorBenchmark.xml",
    simParams=SimParams(stoptime=stoptime, nsteps=-1, dumpevery=3))
mRun.paramOverrides={"elementResI":elRes, "elementResJ":elRes}
mSuite = ModelSuite("output/raytay-suite-simple-%dx%d-%d_t" %\
    (elRes, elRes, stoptime),
    templateMRun=mRun)
ampRange = [0.02 + x * 0.02 for x in range(10)]
mSuite.addVariant("initPerturbation",
    StgXMLVariant("components.lightLayerShape.amplitude", ampRange))

mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndexAndText
mSuite.generateRuns()

if __name__ == "__main__":
    postProcFromExisting = False
    if postProcFromExisting == False:
        jobRunner = credo.jobrunner.defaultRunner()
        mResults = jobRunner.runSuite(mSuite)
    else:
        mResults = mSuite.readResultsFromPath(mSuite.runs[0].basePath)
import os
from credo.modelrun import ModelRun, SimParams
from credo.modelsuite import ModelSuite, StgXMLVariant
import credo.modelsuite as msuite
import credo.jobrunner
from credo.analysis import modelplots
import credo.reporting.standardReports as sReps
from credo.reporting import getGenerators

elRes=16
stoptime=600.0
mRun = ModelRun("RayTay-basic", "RayleighTaylorBenchmark.xml",
    simParams=SimParams(stoptime=stoptime, nsteps=-1, dumpevery=3))
mRun.paramOverrides={"elementResI":elRes, "elementResJ":elRes}

mSuite = ModelSuite("output/raytay-suite-%dx%d-%d_t" % (elRes, elRes, stoptime),
    templateMRun=mRun)
gravRange = [0.7 + x * 0.1 for x in range(4)]
mSuite.addVariant("gravity", StgXMLVariant("gravity", gravRange))
ampRange = [0.02, 0.04, 0.07]
mSuite.addVariant("initPerturbation",
    StgXMLVariant("components.lightLayerShape.amplitude", ampRange))

mSuite.generateRuns()

def reportResults(mSuite, mResults):
    indicesIter = msuite.getVariantIndicesIter(mSuite.modelVariants,
        mSuite.iterGen)
    varNameDicts = msuite.getVariantNameDicts(mSuite.modelVariants, indicesIter)
    for resI, mRes in enumerate(mResults):
        print "Post-process result %d: with" % resI,
        print ", ".join(["%s=%g" % item for item in varNameDicts[resI].iteritems()])
Exemplo n.º 7
0
 def test_generateRuns_product(self):        
     mSuite = ModelSuite(os.path.join("output","genSuiteTest"),
         templateMRun = self.mRun1)
     #TODO: since currently mVariants implemented as a dict, the order
     # these are added doesn't currently matter.
     mSuite.addVariant("depthVary", self.stgI1)
     mSuite.addVariant("ZVary", self.stgI2)
     mSuite.addVariant("scaleTests", self.jobI1)
     mSuite.generateRuns(msuite.product)
     self.assertEqual(len(mSuite.runs),
         len(self.yRange) * len(self.zRange) * len(self.procRange))
     # These are indices into lists above, created manually for testing
     # TODO: below is an experimentally-determined order - bad!
     expIndices = list(msuite.product(
         range(len(self.procRange)),
         range(len(self.yRange)), 
         range(len(self.zRange))
         ))
     for ii, expIndexTuple in enumerate(expIndices):
         pIndex, yIndex, zIndex = expIndexTuple
         self.assertEqual(mSuite.runs[ii].paramOverrides['minY'],
             self.yRange[yIndex])
         self.assertEqual(mSuite.runs[ii].paramOverrides['maxZ'],
             self.zRange[zIndex])
         self.assertEqual(mSuite.runs[ii].jobParams['nproc'],
             self.procRange[pIndex])
         self.assertEqual(mSuite.runs[ii].outputPath,
             os.path.join("output", "genSuiteTest",
                 mSuite.subOutputPathGenFunc(mSuite.runs[ii],
                     mSuite.modelVariants, expIndexTuple, ii)))
     # Now test regenerating produces correct length again
     mSuite.generateRuns()
     self.assertEqual(len(mSuite.runs),
         len(self.yRange) * len(self.zRange) * len(self.procRange))
Exemplo n.º 8
0
    def test_generateRuns_customSubdirs(self):        
        mSuite = ModelSuite(os.path.join("output","genSuiteTest"),
            templateMRun = self.mRun1)
        mSuite.addVariant("depthVary", self.stgI1)
        mSuite.addVariant("ZVary", self.stgI2)

        mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndex
        mSuite.generateRuns(itertools.izip)
        self.assertEqual(len(mSuite.runs),
            min(len(self.yRange), len(self.zRange)))
        for runI in range(len(mSuite.runs)):
            # This should just be a very simple output path based on
            #  run index
            self.assertEqual(mSuite.runs[runI].outputPath,
                os.path.join("output", "genSuiteTest",
                    msuite.getSubdir_RunIndex(None, None, None, runI)))

        mSuite.subOutputPathGenFunc = msuite.getSubdir_TextParamVals
        mSuite.generateRuns(itertools.izip)
        expIndices = [(0,0),(1,1)]
        for runI, expIndexTuple in enumerate(expIndices):
            self.assertEqual(mSuite.runs[runI].outputPath,
                os.path.join("output", "genSuiteTest",
                    msuite.getSubdir_TextParamVals(mSuite.runs[runI],
                    mSuite.modelVariants, expIndexTuple, runI)))
import credo.analysis.modelplots as modelplots

elResSweep = [32, 64, 96, 128, 196, 256]#, 64, 128]#256
defParams = SimParams(nsteps=5)
mRun = ModelRun("RayTayOverturn", "RayleighTaylorBenchmark.xml",
    simParams=defParams)

mSuites = []
for elRes in elResSweep:
    mRun.paramOverrides["elementResI"] = elRes
    mRun.paramOverrides["elementResJ"] = elRes
    outPathBase = os.path.join('output','perfScalingTest', 'res%04d' % elRes)
    if not os.path.exists(outPathBase):
        os.makedirs(outPathBase)

    mSuite = ModelSuite(outPathBase, templateMRun=mRun)
    mSuite.elRes = elRes
    mSuite.procRange = [1, 2, 4, 8]#, 4]#, 8]#, 2, 4]
    mSuite.addVariant("nproc", JobParamVariant("nproc", mSuite.procRange))
    mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndexAndText
    mSuite.generateRuns(iterGen=itertools.izip)
    mSuites.append(mSuite)

#-----------------------------

def reporting(mSuite, mResults):
    print "With element res %d" % mSuite.elRes
    speedups = modelplots.getSpeedups(mSuite.runs, mResults)
    print "Speedups were: %s" % speedups
    csvName = os.path.join(mSuite.outputPathBase, "runPerfInfo.csv")
    csvFile = open(csvName, "wb")
Exemplo n.º 10
0
 def genSuite(self):
     # an empty suite
     self.mSuite = ModelSuite(outputPathBase=self.outputPathBase)
     return self.mSuite
import itertools
from credo.modelrun import ModelRun, SimParams
import credo.modelsuite as msuite
from credo.modelsuite import ModelSuite, StgXMLVariant
import credo.jobrunner
import credo.analysis.modelplots as modelplots

outPathBase = os.path.join('output','resScalingTest')
if not os.path.exists(outPathBase):
    os.makedirs(outPathBase)

defParams = SimParams(nsteps=2)
mRun = ModelRun("RayTayOverturn", "RayleighTaylorBenchmark.xml",
    simParams=defParams)

mSuite = ModelSuite(outPathBase, templateMRun=mRun)
mSuite.resRange = [8, 16, 32, 64, 96, 128, 150, 180, 256]
mSuite.addVariant("elResI", StgXMLVariant("elementResI", mSuite.resRange))
mSuite.addVariant("elResJ", StgXMLVariant("elementResJ", mSuite.resRange))
# TODO: would be nice to have a simpler subpath modifier
mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndexAndText
mSuite.generateRuns(iterGen=itertools.izip)

#-----------------------------

def reporting(mSuite, mResults):
    timePerEls = modelplots.getTimePerEls(mSuite.runs, mResults)
    print "Time per els were: %s" % timePerEls
    csvName = os.path.join(outPathBase, "runPerfInfo.csv")
    csvFile = open(csvName, "wb")
    wtr = csv.writer(csvFile)