# For analytic test, assume the user has specified what fields to analyse # in the XML (In the background this will generate the flattened XML # for the model fTests = mRun.analysis['fieldTests'] fTests.readFromStgXML(mRun.modelInputFiles) # Set all field tolerances at once. Of course, should allow this to # be over-ridden fTests.setAllTols(defaultFieldTol) mRun.writeModelRunXML() # This will generate an additional XML to require StGermain/Underworld # to do any requested extra analysis (eg compare fields), and run # for the appropriate number of timesteps etc. mRun.analysisXML = mrun.analysisXMLGen(mRun) credo.prepareOutputLogDirs(mRun.outputPath, mRun.logPath) # This will run the model, and also save basic results (e.g. walltime) results = mrun.runModel(mRun, customOpts) # TODO: First step necessary since currently convergence files saved # in directory of run, may be better handled within the runModel credo.moveConvergenceResults(os.getcwd(), mRun.outputPath) results.fieldResults = fTests.testConvergence(mRun.outputPath) mres.writeModelResultsXML(results, path=mRun.outputPath) #Now do any required post-processing, depending on type of script credo.cleanupOutputLogDirs(mRun.outputPath, mRun.logPath)
fTests.testTimestep = runSteps fTests.useReference = True fTests.referencePath = expectedPath defFieldTol = 1e-2 for fieldName in standardFields: fTests.add(FieldTest(fieldName, tol=defFieldTol)) mRun.writeInfoXML() # This will generate an additional XML to require StGermain/Underworld to do # any requested extra analysis (eg compare fields), and run for the # appropriate number of timesteps etc. mRun.analysisXMLGen() credo.prepareOutputLogDirs(mRun.outputPath, mRun.logPath) # This will run the model, and also save basic results (e.g. walltime) results = credo.modelrun.runModel(mRun) if not createReference: # TODO: This step necessary since currently convergence files saved # in directory of run may be better handled within the runModel credo.moveConvergenceResults(os.getcwd(), mRun.outputPath) results.fieldResults = fTests.testConvergence(outputPath) credo.modelresult.writeModelResultsXML(results, path=mRun.outputPath) #Now do any required post-processing, depending on type of script credo.cleanupOutputLogDirs(mRun.outputPath, mRun.logPath)