Пример #1
0
    def genSuite(self):
        """See base class :meth:`~credo.systest.api.SysTest.genSuite`.

        For this test, will create a suite containing 2 model runs:
        one to initally run the requested Model and save the results,
        and a 2nd to restart mid-way through, so that the results can
        be compared at the end."""
        # Initial run
        initRun = self._createDefaultModelRun(self.testName + "-initial",
                                              self.initialOutputPath)
        initRun.simParams = SimParams(nsteps=self.fullRunSteps,
                                      cpevery=self.fullRunSteps / 2,
                                      dumpevery=0)
        initRun.cpFields = self.fieldsToTest
        self.mSuite.addRun(initRun, "Do the initial full run and checkpoint"\
            " solutions.")
        # Restart run
        resRun = self._createDefaultModelRun(self.testName + "-restart",
                                             self.restartOutputPath)
        resRun.simParams = SimParams(nsteps=self.fullRunSteps / 2,
                                     cpevery=0,
                                     dumpevery=0,
                                     restartstep=self.fullRunSteps / 2)
        resRun.cpReadPath = self.initialOutputPath
        self.resRunI = self.mSuite.addRun(
            resRun,
            "Do the restart run and check results at end match initial.")
Пример #2
0
    def regenerateFixture(self, jobRunner):
        '''Do a run to create the reference images to use.'''

        print("Running the model for % steps creating reference images every %d"\
            " steps, and saving in dir '%s'" % \
            (self.runSteps, self.compareEvery, self.expectedSolnPath))
        mRun = self._createDefaultModelRun(self.testName + "-createReference",
                                           self.expectedSolnPath)
        mRun.simParams = SimParams(nsteps=self.runSteps,
                                   cpevery=0,
                                   dumpevery=self.compareEvery)
        for imageComp in list(self.imageComps.values()):
            imageComp.attachOps(mRun)
        mRun.writeInfoXML()
        result = jobRunner.runModel(mRun)
        # Now check the required images were actually created
        for imageComp in list(self.imageComps.values()):
            refImageFilename = os.path.join(self.expectedSolnPath,
                                            imageComp.imageFilename)
            if not os.path.exists(refImageFilename):
                raise api.SysTestSetupError("After running model to generate"\
                    " reference image for image '%s', image file doesn't"\
                    " exist. Check your Model's XML that it's set to"\
                    " generate the image correctly, and/or the image filename"\
                    " you specified in your test is correct."\
                    % refImageFilename)
        result.writeRecordXML()
Пример #3
0
    def regenerateFixture(self, jobRunner):
        '''Do a run to create the reference solution to use.'''
        resParams = ("elementResI", "elementResJ", "elementResK")
        ffile = stgxml.createFlattenedXML(self.inputFiles)
        xmlDoc = etree.parse(ffile)
        stgRoot = xmlDoc.getroot()
        origRes = [0] * 3
        for ii, resParam in enumerate(resParams):
            origRes[ii] = stgxml.getParamValue(stgRoot, resParam, int)
        os.remove(ffile)
        highRes = [int(self.highResRatio * res) for res in origRes]

        print "Running the model to create a high-res reference solution "\
            " after %d steps and with res %g times the original, "\
            " and saving in dir '%s'" % \
            (self.runSteps, self.highResRatio, self.expectedSolnPath)
        mRun = self._createDefaultModelRun(self.testName + "-createReference",
                                           self.expectedSolnPath)
        mRun.simParams = SimParams(nsteps=self.runSteps,
                                   cpevery=self.runSteps,
                                   dumpevery=0)
        for ii, resParam in enumerate(resParams):
            mRun.paramOverrides[resParam] = highRes[ii]
        mRun.cpFields = self.fieldsToTest
        mRun.writeInfoXML()
        mRun.analysisXMLGen()
        result = jobRunner.runModel(mRun)
        # It's conceivable this could be useful, if we store results about
        # e.g. solver solution times etc.
        result.writeRecordXML()
Пример #4
0
 def genSuite(self):
     """See base class :meth:`~credo.systest.api.SysTest.genSuite`.
     For this test, just a single model run is needed, to run
     the model and compare against the reference solution."""
     mRun = self._createDefaultModelRun(
         self.testName, os.path.join(self.outputPathBase, "testRun"))
     mRun.simParams = SimParams(nsteps=self.runSteps,
                                cpevery=0,
                                dumpevery=0)
     self.mSuite.addRun(mRun, "Run the model, and check results against "\
         "previously generated reference solution.")
Пример #5
0
    def regenerateFixture(self, jobRunner):
        '''Do a run to create the reference solution to use.'''

        print "Running the model to create a reference solution after %d"\
            " steps, and saving in dir '%s'" % \
            (self.runSteps, self.expectedSolnPath)
        mRun = self._createDefaultModelRun(self.testName + "-createReference",
                                           self.expectedSolnPath)
        mRun.simParams = SimParams(nsteps=self.runSteps,
                                   cpevery=self.runSteps,
                                   dumpevery=0)
        mRun.cpFields = self.fieldsToTest
        mRun.writeInfoXML()
        result = jobRunner.runModel(mRun)
        # It's conceivable this could be useful, if we store results about
        # e.g. solver solution times etc.
        result.writeRecordXML()
Пример #6
0
#! /usr/bin/env python
import os, copy
import csv
from credo.modelrun import ModelRun, SimParams
from credo.modelsuite import ModelSuite
import credo.jobrunner

jobRunner = credo.jobrunner.defaultRunner()

outPathBase = os.path.join('output', 'PPC_Compare')
if not os.path.exists(outPathBase):
    os.makedirs(outPathBase)

defParams = SimParams(nsteps=2)
stdRun = ModelRun("Arrhenius-normal",
                  os.path.join('..', '..', 'Underworld', 'InputFiles',
                               'Arrhenius.xml'),
                  simParams=defParams)
ppcRun = ModelRun("Arrhenius-ppc",
                  "Arrhenius.xml",
                  basePath=os.path.join("Ppc_Testing", "udw_inputfiles"),
                  simParams=defParams)

stdSuite = ModelSuite(os.path.join(outPathBase, "arrBasic"))
ppcSuite = ModelSuite(os.path.join(os.getcwd(), outPathBase, "arrPIC"))

for ii in range(10):
    stdRun.outputPath = os.path.join(stdSuite.outputPathBase, "%.5d" % ii)
    ppcRun.outputPath = os.path.join(ppcSuite.outputPathBase, "%.5d" % ii)
    stdSuite.addRun(copy.deepcopy(stdRun))
    ppcSuite.addRun(copy.deepcopy(ppcRun))
Пример #7
0
inputFiles = args
modelName, ext = os.path.splitext(args[0])
nproc = 1

# For a restart test, these are standard fields to be tested.
modelName += "-restartTest"
outputPath = 'output' + os.sep + modelName
standardFields = ['VelocityField', 'PressureField']
runSteps = 20
assert runSteps % 2 == 0

print "Initial run:"
mRun = ModelRun(modelName + "-initial", inputFiles, outputPath, nproc=nproc)
initialOutputPath = outputPath + os.sep + "initial"
mRun.outputPath = initialOutputPath
mRun.simParams = SimParams(nsteps=runSteps, cpevery=runSteps / 2, dumpevery=0)
mRun.cpFields = standardFields

mRun.writeInfoXML()
credo.prepareOutputLogDirs(mRun.outputPath, mRun.logPath)
# This will run the model, and also save basic results (e.g. walltime)
analysisXML = mRun.analysisXMLGen()
results = credo.modelrun.runModel(mRun)
credo.modelresult.writeModelResultsXML(results, path=mRun.outputPath)

print "Restart run:"
mRun.name = modelName + "-restart"
mRun.outputPath = outputPath + os.sep + "restart"
mRun.cpReadPath = initialOutputPath
# Note we could modify existing SimParams rather than create new, but below is
# probably easier
Пример #8
0
# Do the following live on an options struct?
outputPath = 'output' + os.sep + modelName
expectedPath = 'expected' + os.sep + modelName
nproc = 1

mRun = ModelRun(modelName, inputFiles, outputPath, nproc=nproc)

# TODO: responsibility of SystemTest class?
createReference = False
# For a reference test, these are standard fields to be tested.
standardFields = ['VelocityField', 'PressureField']
runSteps = 10

if createReference:
    mRun.outputPath = expectedPath
    mRun.simParams = SimParams(nsteps=runSteps, cpevery=runSteps, dumpevery=0)
    mRun.cpFields = standardFields
else:
    mRun.simParams = SimParams(nsteps=runSteps, cpevery=0, dumpevery=0)
    fTests = mRun.analysis['fieldTests']
    fTests.testTimestep = runSteps
    fTests.useReference = True
    fTests.referencePath = expectedPath
    defFieldTol = 1e-2
    for fieldName in standardFields:
        fTests.add(FieldTest(fieldName, tol=defFieldTol))

mRun.writeInfoXML()

# This will generate an additional XML to require StGermain/Underworld to do
# any requested extra analysis (eg compare fields), and run for the