Esempio n. 1
0
 def setUp(self):
     self.basedir = os.path.realpath(tempfile.mkdtemp())
     self.mRun1 = ModelRun("testRun1",["Input1.xml"],"./output/tr1")
     self.yRange = [-16000, -10000, -3000]
     self.zRange = [10000, 11000]
     self.procRange = [1, 2, 4, 8]
     self.stgI1 = StgXMLVariant("minY", self.yRange)
     self.stgI2 = StgXMLVariant("maxZ", self.zRange)
     self.jobI1 = JobParamVariant("nproc", self.procRange)
     self.varDict = {"Y":self.stgI1, "Z":self.stgI2}
import thermalConvPostProc
from credo.analysis import modelplots
import credo.reporting.standardReports as sReps
from credo.reporting import getGenerators

testSuite = SysTestSuite("Underworld", "SciBench-ThermalConv-NonDim")
sciBTest = SciBenchmarkTest("ThermalConvNonDimBenchmark")
sciBTest.description = """Tests that Underworld's Thermal Convection capabilty
 accords well with the known Blankenbach (1989) results (non-dimensional)"""
testSuite.sysTests.append(sciBTest)
## Configuration parameters for overall benchmark
RaRange = [1.0e4, 1.0e5, 1.0e6]
elRes = 32
## Configure the model and suite to use for this benchmark
mRun = ModelRun("ThermalConvection_NonDim",
    "ThermalConvectionBenchmark_NonDim.xml",
    simParams=SimParams(dumpevery=100, nsteps=int(2e6)))
    
# Set to resolution needed to pass expected ranges instead of the default 12x12
# Running a suite of models for this Benchmark for 3 Rayleigh Numbers:
# 10^4, 10^5, 10^6
mRun.paramOverrides['elementResI'] = elRes
mRun.paramOverrides['elementResJ'] = elRes
mRun.paramOverrides["components.temperatureContours.interval"] = 0.1
sciBTest.mSuite.templateMRun=mRun
sciBTest.mSuite.addVariant("Ra", StgXMLVariant("Ra", RaRange))
sciBTest.mSuite.addVariant("gravity", StgXMLVariant("gravity", RaRange))
#Generate the actual suite runs
sciBTest.mSuite.subOutputPathGenFunc = msuite.getSubdir_TextParamVals
sciBTest.mSuite.generateRuns(iterGen=itertools.izip)
from credo.analysis import modelplots
import thermalConvPostProc
import credo.reporting.standardReports as sReps
from credo.reporting import getGenerators

testSuite = SysTestSuite("Underworld", "SciBench-ThermalConv-Dim")
sciBTest = SciBenchmarkTest("ThermalConvDimBenchmark")
sciBTest.description = """Tests that Underworld's Thermal Convection capabilty
 accords well with the known Blankenbach (1989) results (dimensional)"""
testSuite.sysTests.append(sciBTest)
# Running a suite of models for this Benchmark for 3 Rayleigh Numbers:
# 10^4, 10^5, 10^6- scaling factor is 10^-12 for vrms and 10-3 for nusselt 
RaRange = [1.0e6, 1.0e5, 1.0e4] 
## Configuration Model and suite to use for this benchmark
mRun = ModelRun("ThermalConvectionDimensionalBenchmark", 
    "ThermalConvectionBenchmark_Dim.xml",
    simParams=SimParams(dumpEvery=100, nsteps=5000),
    nproc=2)
mRun.paramOverrides["steadySteps"] = 100
mRun.paramOverrides["tolerance"] = 0.0005
#Visualisation customisation.
mRun.paramOverrides["components.temperatureContours.interval"] = 100
sciBTest.mSuite.templateMRun=mRun
viscRange = [2.5e17, 2.5e18, 2.5e19]
sciBTest.mSuite.addVariant("viscosity",
    StgXMLVariant("components.viscosity.eta0", viscRange))
sciBTest.mSuite.subOutputPathGenFunc = msuite.getSubdir_TextParamVals
sciBTest.mSuite.generateRuns(iterGen=itertools.izip)
## Configure tests to apply
# These are the ranges of Vrms and Nusselt Numbers from the Blankenbach
# paper (1989)
# For Ra = 10^4, 10^5, 10^6m, rough ranges from Blankenbach paper
#! /usr/bin/env python
import os, copy
import csv
from credo.modelrun import ModelRun, SimParams
from credo.modelsuite import ModelSuite
import credo.jobrunner

jobRunner = credo.jobrunner.defaultRunner()

outPathBase = os.path.join('output','PPC_Compare')
if not os.path.exists(outPathBase):
    os.makedirs(outPathBase)

defParams = SimParams(nsteps=2)
stdRun = ModelRun("Arrhenius-normal",
    os.path.join('..','..', 'Underworld', 'InputFiles', 'Arrhenius.xml'),
    simParams=defParams)
ppcRun = ModelRun("Arrhenius-ppc", "Arrhenius.xml",
    basePath=os.path.join("Ppc_Testing","udw_inputfiles"),
    simParams=defParams)

stdSuite = ModelSuite(os.path.join(outPathBase, "arrBasic"))
ppcSuite = ModelSuite(os.path.join(os.getcwd(), outPathBase, "arrPIC"))

for ii in range(10):
    stdRun.outputPath = os.path.join(stdSuite.outputPathBase, "%.5d" % ii)
    ppcRun.outputPath = os.path.join(ppcSuite.outputPathBase, "%.5d" % ii)
    stdSuite.addRun(copy.deepcopy(stdRun))
    ppcSuite.addRun(copy.deepcopy(ppcRun))

stdResults = jobRunner.runSuite(stdSuite)
Esempio n. 5
0
#! /usr/bin/env python
import os, copy
import csv
from credo.modelrun import ModelRun, SimParams
from credo.modelsuite import ModelSuite
import credo.jobrunner

jobRunner = credo.jobrunner.defaultRunner()

outPathBase = os.path.join('output', 'PPC_Compare')
if not os.path.exists(outPathBase):
    os.makedirs(outPathBase)

defParams = SimParams(nsteps=2)
stdRun = ModelRun("Arrhenius-normal",
                  os.path.join('..', '..', 'Underworld', 'InputFiles',
                               'Arrhenius.xml'),
                  simParams=defParams)
ppcRun = ModelRun("Arrhenius-ppc",
                  "Arrhenius.xml",
                  basePath=os.path.join("Ppc_Testing", "udw_inputfiles"),
                  simParams=defParams)

stdSuite = ModelSuite(os.path.join(outPathBase, "arrBasic"))
ppcSuite = ModelSuite(os.path.join(os.getcwd(), outPathBase, "arrPIC"))

for ii in range(10):
    stdRun.outputPath = os.path.join(stdSuite.outputPathBase, "%.5d" % ii)
    ppcRun.outputPath = os.path.join(ppcSuite.outputPathBase, "%.5d" % ii)
    stdSuite.addRun(copy.deepcopy(stdRun))
    ppcSuite.addRun(copy.deepcopy(ppcRun))
# This is where we create the key data structure, the mRun.
 # It will be a key data structure storing info about the directories
 # used, timestep, fields checkpointed, and be used in APIs to access info.

opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])

#For now just copy all args as input files
inputFiles = args
modelName, ext = os.path.splitext(args[0])
modelName += "-referenceTest"
# Do the following live on an options struct?
outputPath = 'output'+os.sep+modelName
expectedPath = 'expected'+os.sep+modelName
nproc=1

mRun = ModelRun(modelName, inputFiles, outputPath, nproc=nproc)

# TODO: responsibility of SystemTest class?
createReference = False
# For a reference test, these are standard fields to be tested.
standardFields = ['VelocityField','PressureField']
runSteps=10

if createReference:
    mRun.outputPath = expectedPath
    mRun.simParams = SimParams(nsteps=runSteps, cpevery=runSteps, dumpevery=0)
    mRun.cpFields = standardFields
else:
    mRun.simParams = SimParams(nsteps=runSteps, cpevery=0, dumpevery=0)
    fTests = mRun.analysis['fieldTests']
    fTests.testTimestep = runSteps
Esempio n. 7
0
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])

#For now just copy all args as input files
inputFiles = args
modelName, ext = os.path.splitext(args[0])
nproc = 1

# For a restart test, these are standard fields to be tested.
modelName += "-restartTest"
outputPath = 'output' + os.sep + modelName
standardFields = ['VelocityField', 'PressureField']
runSteps = 20
assert runSteps % 2 == 0

print "Initial run:"
mRun = ModelRun(modelName + "-initial", inputFiles, outputPath, nproc=nproc)
initialOutputPath = outputPath + os.sep + "initial"
mRun.outputPath = initialOutputPath
mRun.simParams = SimParams(nsteps=runSteps, cpevery=runSteps / 2, dumpevery=0)
mRun.cpFields = standardFields

mRun.writeInfoXML()
credo.prepareOutputLogDirs(mRun.outputPath, mRun.logPath)
# This will run the model, and also save basic results (e.g. walltime)
analysisXML = mRun.analysisXMLGen()
results = credo.modelrun.runModel(mRun)
credo.modelresult.writeModelResultsXML(results, path=mRun.outputPath)

print "Restart run:"
mRun.name = modelName + "-restart"
mRun.outputPath = outputPath + os.sep + "restart"
# PPC benchmark
# CAN ONLY BE RUN IN SERIAL!
 
import operator
import os
import itertools
from credo.modelrun import ModelRun, SimParams
from credo.modelsuite import ModelSuite, StgXMLVariant
import credo.modelsuite as msuite
import credo.jobrunner
import credo.reporting.standardReports as sReps
from credo.reporting import getGenerators
from thermalConvPostProc import *

mRun = ModelRun("PPCBenchmark", "PPCTest/ExtensionPPC.xml",
    "output/PPCBenchmark",
    simParams=SimParams(stoptime=0.625),
	nproc=1)
	
# Set to resolution needed to pass expected ranges
mRun.paramOverrides['elementResI'] = 64
mRun.paramOverrides['elementResJ'] = 32
	
def testPPC(mRun, mResult):
    #------------------------------------
    # Tests whether the PPC scaling works
    # accurately by comparing the analytic
    # length of extension (ie 10km) with
    # the actual final length
    # The final length is 2.625 given by:
    # Actual length of extension * scaling coefficient:
    # 1.0e3*6.25e-5 = 0.625 which is 2.625 total 
#! /usr/bin/env python
from datetime import timedelta
from credo.modelrun import ModelRun, SimParams
from credo.modelrun import JobParams
import credo.jobrunner.pbsjobrunner

mRun = ModelRun("RayTay-basicBenchmark", "RayleighTaylorBenchmark.xml",
    "output/raytay-scibench-credo-basic-pbs",
    simParams=SimParams(stoptime=20.0),
    paramOverrides={"gravity":1},
    inputFilePath="Underworld/InputFiles")
    #solverOpts="myOpts.opt")
mRun.jobParams = JobParams(nproc=1, maxRunTime=timedelta(minutes=10), pollInterval = 50) 
#jobName MUST be the SAME as the model name for the time being
mRun.jobParams['PBS'] = {'jobNameLine':'#PBS -N RayTay-basicBenchmark'}
#edit this list to include modules req'd on your chosen machine.
mRun.jobParams['PBS']['sourcefiles'] = ['/usr/local/Modules/default/init/bash']
mRun.jobParams['PBS']['modules'] = ['underworld/dev']
mRun.writeInfoXML()
jobRunner = credo.jobrunner.pbsjobrunner.PBSJobRunner()
mResult = jobRunner.runModel(mRun)
mResult.writeRecordXML()

#-----------------------------

mResult.readFrequentOutput()
# This plot doesn't work perhaps due to non interactive mode?
#mResult.freqOutput.plotOverTime('Vrms', depName='Time', show=True, path=mResult.outputPath)

maxVal, maxTimeStep = mResult.freqOutput.getMax('Vrms')
maxTime = mResult.freqOutput.getValueAtStep('Time', maxTimeStep)
#! /usr/bin/env python
import os
from credo.modelrun import ModelRun, SimParams
from credo.modelsuite import ModelSuite, StgXMLVariant
import credo.modelsuite as msuite
import credo.jobrunner
from credo.analysis import modelplots

import credo_rayTaySuite

elRes=32
stoptime=2500.0
mRun = ModelRun("RayTay-basic", "RayleighTaylorBenchmark.xml",
    simParams=SimParams(stoptime=stoptime, nsteps=-1, dumpevery=3))
mRun.paramOverrides={"elementResI":elRes, "elementResJ":elRes}
mSuite = ModelSuite("output/raytay-suite-simple-%dx%d-%d_t" %\
    (elRes, elRes, stoptime),
    templateMRun=mRun)
ampRange = [0.02 + x * 0.02 for x in range(10)]
mSuite.addVariant("initPerturbation",
    StgXMLVariant("components.lightLayerShape.amplitude", ampRange))

mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndexAndText
mSuite.generateRuns()

if __name__ == "__main__":
    postProcFromExisting = False
    if postProcFromExisting == False:
        jobRunner = credo.jobrunner.defaultRunner()
        mResults = jobRunner.runSuite(mSuite)
    else:
Esempio n. 11
0
# This is where we create the key data structure, the mRun.
# It will be a key data structure storing info about the directories
# used, timestep, fields checkpointed, and be used in APIs to access info.

opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])

#For now just copy all args as input files
inputFiles = args
modelName, ext = os.path.splitext(args[0])
modelName += "-referenceTest"
# Do the following live on an options struct?
outputPath = 'output' + os.sep + modelName
expectedPath = 'expected' + os.sep + modelName
nproc = 1

mRun = ModelRun(modelName, inputFiles, outputPath, nproc=nproc)

# TODO: responsibility of SystemTest class?
createReference = False
# For a reference test, these are standard fields to be tested.
standardFields = ['VelocityField', 'PressureField']
runSteps = 10

if createReference:
    mRun.outputPath = expectedPath
    mRun.simParams = SimParams(nsteps=runSteps, cpevery=runSteps, dumpevery=0)
    mRun.cpFields = standardFields
else:
    mRun.simParams = SimParams(nsteps=runSteps, cpevery=0, dumpevery=0)
    fTests = mRun.analysis['fieldTests']
    fTests.testTimestep = runSteps
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])

# For now just copy all args as input files
inputFiles = args
modelName, ext = os.path.splitext(args[0])
nproc = 1

# For a restart test, these are standard fields to be tested.
modelName += "-restartTest"
outputPath = "output" + os.sep + modelName
standardFields = ["VelocityField", "PressureField"]
runSteps = 20
assert runSteps % 2 == 0

print "Initial run:"
mRun = ModelRun(modelName + "-initial", inputFiles, outputPath, nproc=nproc)
initialOutputPath = outputPath + os.sep + "initial"
mRun.outputPath = initialOutputPath
mRun.simParams = SimParams(nsteps=runSteps, cpevery=runSteps / 2, dumpevery=0)
mRun.cpFields = standardFields

mRun.writeInfoXML()
credo.prepareOutputLogDirs(mRun.outputPath, mRun.logPath)
# This will run the model, and also save basic results (e.g. walltime)
analysisXML = mRun.analysisXMLGen()
results = credo.modelrun.runModel(mRun)
credo.modelresult.writeModelResultsXML(results, path=mRun.outputPath)

print "Restart run:"
mRun.name = modelName + "-restart"
mRun.outputPath = outputPath + os.sep + "restart"
#! /usr/bin/env python
import os, copy
import csv
import itertools
import credo.modelsuite as msuite
from credo.modelrun import ModelRun, SimParams
from credo.modelsuite import ModelSuite, JobParamVariant
import credo.jobrunner
import credo.analysis.modelplots as modelplots

elResSweep = [32, 64, 96, 128, 196, 256]#, 64, 128]#256
defParams = SimParams(nsteps=5)
mRun = ModelRun("RayTayOverturn", "RayleighTaylorBenchmark.xml",
    simParams=defParams)

mSuites = []
for elRes in elResSweep:
    mRun.paramOverrides["elementResI"] = elRes
    mRun.paramOverrides["elementResJ"] = elRes
    outPathBase = os.path.join('output','perfScalingTest', 'res%04d' % elRes)
    if not os.path.exists(outPathBase):
        os.makedirs(outPathBase)

    mSuite = ModelSuite(outPathBase, templateMRun=mRun)
    mSuite.elRes = elRes
    mSuite.procRange = [1, 2, 4, 8]#, 4]#, 8]#, 2, 4]
    mSuite.addVariant("nproc", JobParamVariant("nproc", mSuite.procRange))
    mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndexAndText
    mSuite.generateRuns(iterGen=itertools.izip)
    mSuites.append(mSuite)
#!/usr/bin/env python
import credo.jobrunner
from credo.modelrun import ModelRun
from credo.analysis.fields import FieldComparisonOp, FieldComparisonList

mRun = ModelRun("CosineHillRotate", "CosineHillRotateBC.xml", "output/rotateAnalysis" )
mRun.writeInfoXML()

jobRunner = credo.jobrunner.defaultRunner()
mRes = jobRunner.runModel(mRun)
mRes.writeRecordXML()

#-----------------------------
# Post-process
fieldComps = FieldComparisonList()
# Should there be an interface to get from result too?
fieldComps.readFromStgXML(mRun.modelInputFiles, mRun.basePath)
fieldComps.postRun(mRun, mRun.basePath)
fCompResults = fieldComps.getAllResults(mRes)

for res in fCompResults:
    print "Error for field '%s' was %s" % (res.fieldName, res.dofErrors)
    res.plotOverTime(show=True, path=mRes.outputPath)

mRes.readFrequentOutput()
mRes.freqOutput.plotOverTime('Time', show=True, path=mRes.outputPath)

maxTime, maxTimeStep = mRes.freqOutput.getMax('Time')
print "Maximum value of time was %f, at step %d" % (maxTime, maxTimeStep)