예제 #1
0
    def test_generateRuns_customSubdirs(self):        
        mSuite = ModelSuite(os.path.join("output","genSuiteTest"),
            templateMRun = self.mRun1)
        mSuite.addVariant("depthVary", self.stgI1)
        mSuite.addVariant("ZVary", self.stgI2)

        mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndex
        mSuite.generateRuns(itertools.izip)
        self.assertEqual(len(mSuite.runs),
            min(len(self.yRange), len(self.zRange)))
        for runI in range(len(mSuite.runs)):
            # This should just be a very simple output path based on
            #  run index
            self.assertEqual(mSuite.runs[runI].outputPath,
                os.path.join("output", "genSuiteTest",
                    msuite.getSubdir_RunIndex(None, None, None, runI)))

        mSuite.subOutputPathGenFunc = msuite.getSubdir_TextParamVals
        mSuite.generateRuns(itertools.izip)
        expIndices = [(0,0),(1,1)]
        for runI, expIndexTuple in enumerate(expIndices):
            self.assertEqual(mSuite.runs[runI].outputPath,
                os.path.join("output", "genSuiteTest",
                    msuite.getSubdir_TextParamVals(mSuite.runs[runI],
                    mSuite.modelVariants, expIndexTuple, runI)))
예제 #2
0
 def test_generateRuns_izip(self):        
     mSuite = ModelSuite(os.path.join("output","genSuiteTest"),
         templateMRun = self.mRun1)
     mSuite.addVariant("depthVary", self.stgI1)
     mSuite.addVariant("ZVary", self.stgI2)
     mSuite.generateRuns(itertools.izip)
     self.assertEqual(len(mSuite.runs),
         min(len(self.yRange), len(self.zRange)))
     # These are indices into lists above, created manually for testing
     expIndices = [(0,0),(1,1)]
     for ii, expIndexTuple in enumerate(expIndices):
         yIndex, zIndex = expIndexTuple
         self.assertEqual(mSuite.runs[ii].paramOverrides['minY'],
             self.yRange[yIndex])
         self.assertEqual(mSuite.runs[ii].paramOverrides['maxZ'],
             self.zRange[zIndex])
         self.assertEqual(mSuite.runs[ii].outputPath,
             os.path.join("output", "genSuiteTest",
                 mSuite.subOutputPathGenFunc(mSuite.runs[ii],
                 mSuite.modelVariants, expIndexTuple, ii)))
예제 #3
0
 def test_generateRuns_product(self):        
     mSuite = ModelSuite(os.path.join("output","genSuiteTest"),
         templateMRun = self.mRun1)
     #TODO: since currently mVariants implemented as a dict, the order
     # these are added doesn't currently matter.
     mSuite.addVariant("depthVary", self.stgI1)
     mSuite.addVariant("ZVary", self.stgI2)
     mSuite.addVariant("scaleTests", self.jobI1)
     mSuite.generateRuns(msuite.product)
     self.assertEqual(len(mSuite.runs),
         len(self.yRange) * len(self.zRange) * len(self.procRange))
     # These are indices into lists above, created manually for testing
     # TODO: below is an experimentally-determined order - bad!
     expIndices = list(msuite.product(
         range(len(self.procRange)),
         range(len(self.yRange)), 
         range(len(self.zRange))
         ))
     for ii, expIndexTuple in enumerate(expIndices):
         pIndex, yIndex, zIndex = expIndexTuple
         self.assertEqual(mSuite.runs[ii].paramOverrides['minY'],
             self.yRange[yIndex])
         self.assertEqual(mSuite.runs[ii].paramOverrides['maxZ'],
             self.zRange[zIndex])
         self.assertEqual(mSuite.runs[ii].jobParams['nproc'],
             self.procRange[pIndex])
         self.assertEqual(mSuite.runs[ii].outputPath,
             os.path.join("output", "genSuiteTest",
                 mSuite.subOutputPathGenFunc(mSuite.runs[ii],
                     mSuite.modelVariants, expIndexTuple, ii)))
     # Now test regenerating produces correct length again
     mSuite.generateRuns()
     self.assertEqual(len(mSuite.runs),
         len(self.yRange) * len(self.zRange) * len(self.procRange))
import credo.modelsuite as msuite
import credo.jobrunner
from credo.analysis import modelplots

import credo_rayTaySuite

elRes=32
stoptime=2500.0
mRun = ModelRun("RayTay-basic", "RayleighTaylorBenchmark.xml",
    simParams=SimParams(stoptime=stoptime, nsteps=-1, dumpevery=3))
mRun.paramOverrides={"elementResI":elRes, "elementResJ":elRes}
mSuite = ModelSuite("output/raytay-suite-simple-%dx%d-%d_t" %\
    (elRes, elRes, stoptime),
    templateMRun=mRun)
ampRange = [0.02 + x * 0.02 for x in range(10)]
mSuite.addVariant("initPerturbation",
    StgXMLVariant("components.lightLayerShape.amplitude", ampRange))

mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndexAndText
mSuite.generateRuns()

if __name__ == "__main__":
    postProcFromExisting = False
    if postProcFromExisting == False:
        jobRunner = credo.jobrunner.defaultRunner()
        mResults = jobRunner.runSuite(mSuite)
    else:
        mResults = mSuite.readResultsFromPath(mSuite.runs[0].basePath)
    credo_rayTaySuite.reportResults(mSuite, mResults)
    import credo.reporting.reportLab as rlRep
    rlRep.makeSuiteReport(mSuite, 
        os.path.join(mSuite.outputPathBase, "%s-report.pdf" %\
defParams = SimParams(nsteps=5)
mRun = ModelRun("RayTayOverturn", "RayleighTaylorBenchmark.xml",
    simParams=defParams)

mSuites = []
for elRes in elResSweep:
    mRun.paramOverrides["elementResI"] = elRes
    mRun.paramOverrides["elementResJ"] = elRes
    outPathBase = os.path.join('output','perfScalingTest', 'res%04d' % elRes)
    if not os.path.exists(outPathBase):
        os.makedirs(outPathBase)

    mSuite = ModelSuite(outPathBase, templateMRun=mRun)
    mSuite.elRes = elRes
    mSuite.procRange = [1, 2, 4, 8]#, 4]#, 8]#, 2, 4]
    mSuite.addVariant("nproc", JobParamVariant("nproc", mSuite.procRange))
    mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndexAndText
    mSuite.generateRuns(iterGen=itertools.izip)
    mSuites.append(mSuite)

#-----------------------------

def reporting(mSuite, mResults):
    print "With element res %d" % mSuite.elRes
    speedups = modelplots.getSpeedups(mSuite.runs, mResults)
    print "Speedups were: %s" % speedups
    csvName = os.path.join(mSuite.outputPathBase, "runPerfInfo.csv")
    csvFile = open(csvName, "wb")
    wtr = csv.writer(csvFile)
    wtr.writerow(["Run", "nproc", "walltime (sec)", "max mem (MB)", "speedup"])
    for runI, mRes in enumerate(mResults):
import credo.modelsuite as msuite
from credo.modelsuite import ModelSuite, StgXMLVariant
import credo.jobrunner
import credo.analysis.modelplots as modelplots

outPathBase = os.path.join('output','resScalingTest')
if not os.path.exists(outPathBase):
    os.makedirs(outPathBase)

defParams = SimParams(nsteps=2)
mRun = ModelRun("RayTayOverturn", "RayleighTaylorBenchmark.xml",
    simParams=defParams)

mSuite = ModelSuite(outPathBase, templateMRun=mRun)
mSuite.resRange = [8, 16, 32, 64, 96, 128, 150, 180, 256]
mSuite.addVariant("elResI", StgXMLVariant("elementResI", mSuite.resRange))
mSuite.addVariant("elResJ", StgXMLVariant("elementResJ", mSuite.resRange))
# TODO: would be nice to have a simpler subpath modifier
mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndexAndText
mSuite.generateRuns(iterGen=itertools.izip)

#-----------------------------

def reporting(mSuite, mResults):
    timePerEls = modelplots.getTimePerEls(mSuite.runs, mResults)
    print "Time per els were: %s" % timePerEls
    csvName = os.path.join(outPathBase, "runPerfInfo.csv")
    csvFile = open(csvName, "wb")
    wtr = csv.writer(csvFile)
    wtr.writerow(["Run", "res(els)", "walltime (sec)", "max mem (MB)", 
        "Time(sec)/element"])