def customReporting(sciBTest, mResults): # This gets called immediately post the models being run. # First set up overall analysis images modelplots.plotOverAllRuns(mResults, 'Vrms', depName='Time', path=sciBTest.outputPathBase) sciBTest.mSuite.analysisImages = ["Vrms-multiRunTimeSeries.png"] # Now specific per-run images fStep = mResults[0].freqOutput.finalStep() dEvery = sciBTest.mSuite.runs[0].simParams.dumpevery lastImgStep = fStep / dEvery * dEvery vrmsPeakTime = sciBTest.testComps[0]['VRMS of first diapir'].actualTime vrmsPeakStep = mResults[0].freqOutput.getClosest('Time', vrmsPeakTime)[1] vrmsPeakImgStep = int(round(vrmsPeakStep / float(dEvery))) * dEvery vrmsPeakImgStep = min([vrmsPeakImgStep, lastImgStep]) # Create an empty list of images to display sciBTest.mSuite.modelImagesToDisplay = [[] for runI in \ range(len(sciBTest.mSuite.runs))] # Choose which model timestep images to display:- note that here we're # programmatically choosing to show the Peak VRMS timestep. sciBTest.mSuite.modelImagesToDisplay[0] = [ (10, "initial state"), (vrmsPeakImgStep, "near first VRMS peak at t=%f" % vrmsPeakTime), (lastImgStep, "")] # Here we just ask the CREDO reporting API to get Report Generators for # PDF (ReportLab) and RST (Restructured Text) output, and create a # standard science benchmark report. for rGen in getGenerators(["RST", "ReportLab"], sciBTest.outputPathBase): sReps.makeSciBenchReport(sciBTest, mResults, rGen, os.path.join(sciBTest.outputPathBase, "%s-report.%s" %\ (sciBTest.testName, rGen.stdExt)), imgPerRow=2)
def customReport_VRMS_Nusselt(sciBTest, mResults): # Some custom output generation and analysis vrmsTCs, vrmsResults = sciBTest.getTCRes("VRMS vs Blankenbach") nusseltTCs, nusseltResults = sciBTest.getTCRes("Nusselt vs Blankenbach") vrmsActuals = [tc.actualVal for tc in vrmsTCs] nusseltActuals = [tc.actualVal for tc in nusseltTCs] # TODO: useful if below values available on modelResults automatically. for mRes in mResults: mRes.readFrequentOutput() nSteps = [mRes.freqOutput.finalStep() for mRes in mResults] # Plotting / CSV writing thermalConvPostProc.plotResultsVsBlankenbach(BBRa, BBVrmsMin, BBVrmsMax, BBNusseltMin, BBNusseltMax, {"UW Actual":vrmsActuals}, {"UW Actual":nusseltActuals}, os.path.join(sciBTest.outputPathBase, "VrmsAndNusseltValues.png")) observables = {'Vrms':vrmsActuals, 'Vrms Passed':vrmsResults, 'Nusselt':nusseltActuals, 'Nusselt Passed':nusseltResults, 'nSteps':nSteps} msuite.writeInputsOutputsToCSV(sciBTest.mSuite, observables, "thermalDimBMResults.csv") modelplots.plotOverAllRuns(mResults, 'Nusselt', path=sciBTest.outputPathBase) modelplots.plotOverAllRuns(mResults, 'Vrms', path=sciBTest.outputPathBase) #TODO: modularise the below import plotCpuTimesAllRuns as plotCpus plotCpus.plotAllRuns(sciBTest.outputPathBase) sciBTest.mSuite.analysisImages = [ 'VrmsAndNusseltValues.png', 'Nusselt-multiRunTimeSeries.png', 'Vrms-multiRunTimeSeries.png', 'cpuTimePerStep.png'] sciBTest.mSuite.modelImagesToDisplay = [[] for runI in \ range(len(sciBTest.mSuite.runs))] lastImgSteps = [] for finalStep, mRun in zip(nSteps, sciBTest.mSuite.runs): simParams = mRun.getSimParams() lastImgSteps.append(simParams.nearestDumpStep(finalStep, finalStep)) sciBTest.mSuite.modelImagesToDisplay[0] = [ (0, ""), (700, ""), (lastImgSteps[0], "")] sciBTest.mSuite.modelImagesToDisplay[1] = [ (0, ""), (800, ""), (lastImgSteps[1], "")] sciBTest.mSuite.modelImagesToDisplay[2] = [ (0, ""), (400, ""), (lastImgSteps[2], "")] for rGen in getGenerators(["RST", "ReportLab"], sciBTest.outputPathBase): sReps.makeSciBenchReport(sciBTest, mResults, rGen, os.path.join(sciBTest.outputPathBase, "%s-report.%s" %\ (sciBTest.testName, rGen.stdExt)), imgPerRow=3)
def customReport_VRMS(sciBTest, mResults): for mRes in mResults: mRes.readFrequentOutput() # Some custom output generation and analysis fSteps = [mResult.freqOutput.finalStep() for mResult in mResults] vrmsTCs, vrmsResults = sciBTest.getTCRes("VRMS Max") vrmsActuals = [tc.actualVal for tc in vrmsTCs] # Now specific per-run images dEvery = sciBTest.mSuite.runs[0].simParams.dumpevery lastImgSteps = [fStep / dEvery * dEvery for fStep in fSteps] # find the timestep vrmsPeakTimes = [tc.actualTime for tc in vrmsTCs] vrmsPeakSteps = [mRes.freqOutput.getClosest('Time', peakTime)[1] for mRes, peakTime in\ zip(mResults, vrmsPeakTimes) ] # TODO: useful if below values available on modelResults automatically. # Plotting modelplots.plotOverAllRuns(mResults, 'Vrms', depName='Time', path=sciBTest.outputPathBase) modelplots.plotOverAllRuns(mResults, 'Entrainment', path=sciBTest.outputPathBase, depName='Time') #TODO: modularise the below import plotCpuTimesAllRuns as plotCpus plotCpus.plotAllRuns(sciBTest.outputPathBase) sciBTest.mSuite.analysisImages = [ 'Vrms-multiRunTimeSeries.png', 'Entrainment-multiRunTimeSeries.png', 'cpuTimePerStep.png'] sciBTest.mSuite.modelImagesToDisplay = [[] for runI in \ range(len(sciBTest.mSuite.runs))] lastImgSteps = [] vrmsPeakImgSteps = [] for runI, mRun in enumerate(sciBTest.mSuite.runs): finalStep = fSteps[runI] simParams = mRun.getSimParams() lastImgSteps.append(simParams.nearestDumpStep(finalStep, finalStep)) vrmsPeakImgSteps.append(simParams.nearestDumpStep(vrmsPeakSteps[runI], finalStep)) for resI, mResult in enumerate(mResults): simParams = sciBTest.mSuite.runs[resI].getSimParams() qtrStep = simParams.nearestDumpStep(fSteps[resI]//4, fSteps[resI]) halfStep = simParams.nearestDumpStep(fSteps[resI]//2, fSteps[resI]) qtrTime = mResult.freqOutput.getValueAtStep("Time", qtrStep) halfTime = mResult.freqOutput.getValueAtStep("Time", halfStep) sciBTest.mSuite.modelImagesToDisplay[resI] = [ (0, ""), (vrmsPeakImgSteps[resI], "VRMS Peak, t=%f" % vrmsPeakTimes[resI]), (qtrStep, "t=%f" % qtrTime), (halfStep, "t=%f" % halfTime), (lastImgSteps[resI], "Final, t=%f" % (stopTime))] for rGen in getGenerators(["RST", "ReportLab"], sciBTest.outputPathBase): sReps.makeSciBenchReport(sciBTest, mResults, rGen, os.path.join(sciBTest.outputPathBase, "%s-report.%s" %\ (sciBTest.testName, rGen.stdExt)), imgPerRow=3)
def reportResults(mSuite, mResults): indicesIter = msuite.getVariantIndicesIter(mSuite.modelVariants, mSuite.iterGen) varNameDicts = msuite.getVariantNameDicts(mSuite.modelVariants, indicesIter) for resI, mRes in enumerate(mResults): print "Post-process result %d: with" % resI, print ", ".join(["%s=%g" % item for item in varNameDicts[resI].iteritems()]) mRes.readFrequentOutput() mRes.freqOutput.plotOverTime('Vrms', depName='Time', path=mRes.outputPath) maxVal, maxTimeStep = mRes.freqOutput.getMax('Vrms') maxTime = mRes.freqOutput.getValueAtStep('Time', maxTimeStep) print "\tMaximum value of Vrms was %f, at time %.2f" % (maxVal, maxTime) modelplots.plotOverAllRuns(mResults, 'Vrms', depName='Time', path=mSuite.outputPathBase) mSuite.analysisImages = [ 'Vrms-multiRunTimeSeries.png'] mSuite.modelImagesToDisplay = [[] for runI in range(len(mSuite.runs))] for runI, mRun in enumerate(mSuite.runs): mRes = mResults[runI] fStep = mRes.freqOutput.finalStep() fTime = mRes.freqOutput.getValueAtStep('Time', fStep) dEvery = mRun.simParams.dumpevery lastImgStep = fStep / dEvery * dEvery vrmsMax, vrmsPeakStep = mRes.freqOutput.getMax('Vrms') vrmsMaxTime = mRes.freqOutput.getValueAtStep('Time', vrmsPeakStep) vrmsPeakImgStep = mRun.simParams.nearestDumpStep(fStep, vrmsPeakStep) vrmsPeakImgTime = mRes.freqOutput.getValueAtStep('Time', vrmsPeakImgStep) mSuite.modelImagesToDisplay[runI] = [ (0, "initial"), (vrmsPeakImgStep, "t=%f, near VRMS peak %g (at t=%g)" %\ (vrmsPeakImgTime, vrmsMax, vrmsMaxTime)), (mRun.simParams.nearestDumpStep(fStep, fStep*.5), "1/2"), (mRun.simParams.nearestDumpStep(fStep, fStep*.75), "3/4"), (lastImgStep, "final state (t=%f)" % fTime)] for rGen in getGenerators(["RST", "ReportLab"], mSuite.outputPathBase): sReps.makeSuiteReport(mSuite, mResults, rGen, os.path.join(mSuite.outputPathBase, "%s-report.%s" %\ ("RayTaySuite-examples", rGen.stdExt)), imgPerRow=3)
def customReporting(sciBTest, mResults): #Plotting/CSV writing for mRes in mResults: mRes.readFrequentOutput() vrmsTCs, vrmsResults = sciBTest.getTCRes("Scaled VRMS") recSteps = [mRes.freqOutput.finalStep() for mRes in mResults] vrmsActuals = [mRes.freqOutput.getValueAtStep("Nusselt", ns) \ for mRes, ns in zip(mResults, recSteps)] nusseltActuals = [mRes.freqOutput.getValueAtStep("Nusselt", ns) \ for mRes, ns in zip(mResults, recSteps)] for mRes in mResults: mRes.freqOutput.plotOverTime('Vrms', depName='Time', show=False, path=mRes.outputPath) mRes.freqOutput.plotOverTime('Nusselt', depName='Time', show=False, path=mRes.outputPath) observables = {'Vrms':vrmsActuals, 'Vrms Pass':vrmsResults, 'Nusselt':nusseltActuals, 'nSteps':recSteps, 'ScalingFac':vrmsScalingFactors} msuite.writeInputsOutputsToCSV(sciBTest.mSuite, observables, "scalingResults.csv") # Actually for this benchmark, we want to show the VRMS and Nusselt # images generated in _each run_ sciBTest.mSuite.analysisImages = None #sciBTest.mSuite.analysisImages = [ # 'VrmsAndNusseltValues.png', # 'Nusselt-multiRunTimeSeries.png', # 'Vrms-multiRunTimeSeries.png'] sciBTest.mSuite.modelImagesToDisplay = [[] for runI in \ range(len(sciBTest.mSuite.runs))] lastImgSteps = [] for finalStep, mRun in zip(recSteps, sciBTest.mSuite.runs): simParams = mRun.getSimParams() lastImgSteps.append(simParams.nearestDumpStep(finalStep, finalStep)) for runI in range(len(mResults)): sciBTest.mSuite.modelImagesToDisplay[runI] = [ (0, "initial"), (lastImgSteps[runI], "final")] for rGen in getGenerators(["RST", "ReportLab"], sciBTest.outputPathBase): sReps.makeSciBenchReport(sciBTest, mResults, rGen, os.path.join(sciBTest.outputPathBase, "%s-report.%s" %\ (sciBTest.testName, rGen.stdExt)), imgPerRow=2)
plt.plot(r, var / scale[field_name], 'k--', label=sim, zorder=1) plt.xlabel('radius (m)') plt.ylabel(field_name + ' (' + unit[field_name] + ')') plt.title(' '.join((model_name, 'comparison with', tc_name))) img_filename_base = '_'.join( (model_name, tc_name, 'comparison', field_name)) img_filename_base = img_filename_base.replace(' ', '_') img_filename = os.path.join(problem1_test.mSuite.runs[run_index].basePath, problem1_test.mSuite.outputPathBase, img_filename_base) plt.legend(loc='upper left') plt.tight_layout(pad=3.) plt.savefig(img_filename + '.png', dpi=300) plt.savefig(img_filename + '.pdf') plt.clf() problem1_test.mSuite.analysisImages.append(img_filename + '.png') # generate report: for rGen in getGenerators(["RST"], problem1_test.outputPathBase): report_filename = os.path.join( problem1_test.outputPathBase, "%s-report.%s" % (problem1_test.testName, rGen.stdExt)) sReps.makeSciBenchReport(problem1_test, mResults, rGen, report_filename) html_filename = os.path.join( problem1_test.outputPathBase, "%s-report.%s" % (problem1_test.testName, 'html')) html = publish_file(source_path=report_filename, destination_path=html_filename, writer_name="html")
title = run_name.replace('single', 'single porosity').replace('minc', 'MINC') plt.plot(var[:n], z, '-', label = 'Waiwera ' + title) var = AUTOUGH2_result[run_name].getFieldAtOutputIndex(field_name, -1) / scale plt.plot(var[:n], z, 's', label = 'AUTOUGH2 ' + title) plt.xlabel(field_name + ' (' + unit + ')') plt.ylabel('z (m)') plt.title(' '. join(['Final', field_name.lower(), 'profile'])) img_filename_base = '_'.join((model_name, field_name)) img_filename_base = img_filename_base.replace(' ', '_') img_filename = os.path.join(minc_column_test.mSuite.runs[0].basePath, minc_column_test.mSuite.outputPathBase, img_filename_base) plt.legend(loc = 'best') plt.tight_layout(pad = 3.) plt.savefig(img_filename + '.png', dpi = 300) plt.savefig(img_filename + '.pdf') plt.clf() minc_column_test.mSuite.analysisImages.append(img_filename + '.png') # generate report: for rGen in getGenerators(["RST"], minc_column_test.outputPathBase): report_filename = os.path.join(minc_column_test.outputPathBase, "%s-report.%s" % (minc_column_test.testName, rGen.stdExt)) sReps.makeSciBenchReport(minc_column_test, mResults, rGen, report_filename) html_filename = os.path.join(minc_column_test.outputPathBase, "%s-report.%s" % (minc_column_test.testName, 'html')) html = publish_file(source_path = report_filename, destination_path = html_filename, writer_name = "html")
plt.xlabel('time (years)') plt.ylabel(field_name + ' (' + unit + ')') plt.legend(loc='best') plt.title(' '.join((field_name, 'history at production well'))) img_filename_base = '_'.join( (model_name, run_name, 'history', field_name)) img_filename_base = img_filename_base.replace(' ', '_') img_filename = os.path.join( minc_production_test.mSuite.runs[run_index].basePath, minc_production_test.mSuite.outputPathBase, img_filename_base) plt.tight_layout(pad=3.) plt.savefig(img_filename + '.png', dpi=300) plt.savefig(img_filename + '.pdf') plt.clf() minc_production_test.mSuite.analysisImages.append(img_filename + '.png') # generate report: for rGen in getGenerators(["RST"], minc_production_test.outputPathBase): report_filename = os.path.join( minc_production_test.outputPathBase, "%s-report.%s" % (minc_production_test.testName, rGen.stdExt)) sReps.makeSciBenchReport(minc_production_test, mResults, rGen, report_filename) html_filename = os.path.join( minc_production_test.outputPathBase, "%s-report.%s" % (minc_production_test.testName, 'html')) html = publish_file(source_path=report_filename, destination_path=html_filename, writer_name="html")
if labels[i] in labels[:i]: del (labels[i]) del (handles[i]) else: i += 1 plt.legend(handles, labels, loc='best') # time labels: plt.text(0.013, 0.5, "0.01 days") plt.text(0.03, 0.65, "0.06 days") plt.text(0.06, 0.73, "0.11 days") plt.tight_layout(pad=3.) plt.savefig(img_filename + '.png', dpi=300) plt.savefig(img_filename + '.pdf') plt.clf() infiltration_test.mSuite.analysisImages.append(img_filename + '.png') # generate report: for rGen in getGenerators(["RST"], infiltration_test.outputPathBase): report_filename = os.path.join( infiltration_test.outputPathBase, "%s-report.%s" % (infiltration_test.testName, rGen.stdExt)) sReps.makeSciBenchReport(infiltration_test, mResults, rGen, report_filename) html_filename = os.path.join( infiltration_test.outputPathBase, "%s-report.%s" % (infiltration_test.testName, 'html')) html = publish_file(source_path=report_filename, destination_path=html_filename, writer_name="html")
r = result.getCoordinates() var = result.getFieldAtOutputIndex(field_name, outputIndex) plt.semilogx(r, var / field_scale[field_name], symbol[sim], label = sim) plt.xlabel('radius (m)') plt.ylabel(field_name + ' (' + field_unit[field_name] + ')') plt.title(field_name) img_filename_base = '_'.join((model_name, tc_name, 'comparison', field_name)) img_filename_base = img_filename_base.replace(' ', '_') img_filename = os.path.join(heat_pipe_test.mSuite.runs[run_index].basePath, heat_pipe_test.mSuite.outputPathBase, img_filename_base) plt.xlim([min_radius, max_radius]) plt.legend(loc = 'center right') plt.tight_layout(pad = 3.) plt.savefig(img_filename + '.png', dpi = 300) plt.savefig(img_filename + '.pdf') plt.clf() heat_pipe_test.mSuite.analysisImages.append(img_filename + '.png') # generate report: for rGen in getGenerators(["RST"], heat_pipe_test.outputPathBase): report_filename = os.path.join(heat_pipe_test.outputPathBase, "%s-report.%s" % (heat_pipe_test.testName, rGen.stdExt)) sReps.makeSciBenchReport(heat_pipe_test, mResults, rGen, report_filename) html_filename = os.path.join(heat_pipe_test.outputPathBase, "%s-report.%s" % (heat_pipe_test.testName, 'html')) html = publish_file(source_path = report_filename, destination_path = html_filename, writer_name = "html")
"temp": ("Temperature", "Temperature relative error", "$^\circ$C"), "vapsat": ("Vapour saturation", "Vapour saturation relative error", ""), } for i, tc_name in enumerate(names.keys()): field_name, title, unit = names[tc_name] var = np.array(sciBTest.testComps[0][tc_name].fieldErrors[field_name]) var = np.hstack([atmvals, var]) geo.slice_plot(slc, var, title, unit, plt=plt) img_filename = os.path.join(sciBTest.mSuite.runs[0].basePath, sciBTest.mSuite.outputPathBase, ("%i.png" % i)) plt.savefig(img_filename, dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False) plt.clf() sciBTest.mSuite.analysisImages.append(img_filename) # --------------------------------------------------------------------------- # report generation for rGen in getGenerators(["RST"], sciBTest.outputPathBase): sReps.makeSciBenchReport( sciBTest, mResults, rGen, os.path.join(sciBTest.outputPathBase, "%s-report.%s" % (sciBTest.testName, rGen.stdExt))) print("NOTE: use 'rst2html xxx-report.rst > xxx-report.html' to generate html")
plt.xlabel('time (s)') plt.ylabel(field_name + ' (' + unit[field_name] + ')') plt.legend(loc='best') plt.title(' '.join((run_name, field_name.lower()))) img_filename_base = '_'.join( (model_name, run_name, tc_name, field_name)) img_filename_base = img_filename_base.replace(' ', '_') img_filename = os.path.join( deliverability_test.mSuite.runs[run_index].basePath, deliverability_test.mSuite.outputPathBase, img_filename_base + '.png') plt.tight_layout(pad=3.) plt.savefig(img_filename) plt.clf() deliverability_test.mSuite.analysisImages.append(img_filename) # generate report: for rGen in getGenerators(["RST"], deliverability_test.outputPathBase): report_filename = os.path.join( deliverability_test.outputPathBase, "%s-report.%s" % (deliverability_test.testName, rGen.stdExt)) sReps.makeSciBenchReport(deliverability_test, mResults, rGen, report_filename) html_filename = os.path.join( deliverability_test.outputPathBase, "%s-report.%s" % (deliverability_test.testName, 'html')) html = publish_file(source_path=report_filename, destination_path=html_filename, writer_name="html")
Example created:- 2011/04/12, PatrickSunter""" import os import credo.jobrunner import credo.reporting as rep import credo.reporting.standardReports as sReps import testAll_lowres ts = testAll_lowres.suite() sysTest = ts.sysTests[0] # TODO: really should post-proc from existing, but this isn't working perfectly # yet for basic sys tests. jobRunner = credo.jobrunner.defaultRunner() sysTest.setupTest() testResult, mResults = sysTest.runTest(jobRunner, postProcFromExisting=False, createReports=True) # Dodgy test report configuration sysTest.mSuite.analysisImages = [] sysTest.mSuite.modelImagesToDisplay = None for rGen in rep.getGenerators(["RST", "ReportLab"], sysTest.outputPathBase): sReps.makeSciBenchReport(sysTest, mResults, rGen, os.path.join(sysTest.outputPathBase, "%s-report.%s" %\ (sysTest.testName, rGen.stdExt)), imgPerRow=2)
suite to run a report and generate images. Example created:- 2011/04/12, PatrickSunter""" import os import credo.jobrunner import credo.reporting as rep import credo.reporting.standardReports as sReps import testAll_lowres ts = testAll_lowres.suite() sysTest = ts.sysTests[0] # TODO: really should post-proc from existing, but this isn't working perfectly # yet for basic sys tests. jobRunner = credo.jobrunner.defaultRunner() sysTest.setupTest() testResult, mResults = sysTest.runTest(jobRunner, postProcFromExisting=False, createReports=True) # Dodgy test report configuration sysTest.mSuite.analysisImages = [] sysTest.mSuite.modelImagesToDisplay = None for rGen in rep.getGenerators(["RST", "ReportLab"], sysTest.outputPathBase): sReps.makeSciBenchReport(sysTest, mResults, rGen, os.path.join(sysTest.outputPathBase, "%s-report.%s" %\ (sysTest.testName, rGen.stdExt)), imgPerRow=2)
def customReporting(sciBTest, mResults): import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt stress = {} recDeviatoricStress = {} recDeviatoricStressTCs = {} recDeviatoricStressRes = {} stressTCs = {} stressRes = {} for fComp in ["XX", "XY", "YY"]: recDeviatoricStressTCs[fComp], recDeviatoricStressRes[fComp] = sciBTest.getTCRes(\ "recoveredDeviatoricStress-%s" % fComp) stressTCs[fComp], stressRes[fComp] = sciBTest.getTCRes(\ "Stress-%s" % fComp) recDeviatoricStress[fComp] = [tc.actualVal for tc in recDeviatoricStressTCs[fComp]] stress[fComp] = [tc.actualVal for tc in stressTCs[fComp]] inIter = msuite.getVariantIndicesIter(sciBTest.mSuite.modelVariants, iterStrategy) varDicts = msuite.getVariantNameDicts(sciBTest.mSuite.modelVariants, inIter) for resI, mRes in enumerate(mResults): print "Post-process result %d" % (resI), print "with variants applied of:" print varDicts[resI] print "Value of recDeviatoricStress(%s) is (%s)\n stress(%s) is (%s)." %\ (", ".join(fCompMap), ", ".join(["%g" % recDeviatoricStress[c][resI] for c in fCompMap]), ", ".join(fCompMap), ", ".join(["%g" % stress[c][resI] for c in fCompMap])) # TO DO: #plotDeviatoricStressVsAnalytic(maxStressXX, maxStressYY, maxStressXY, # minStressXX, minStressYY, minStressXY, # {"DeviatoricStress_XX":recDeviatoricStress['XX']}, {"DeviatoricStress_YY":recDeviatoricStress['YY']}, # {"DeviatoricStress_XY":recDeviatoricStress['XY']}, # {"Stress_XX":stressXX}, {"Stress_YY":stressYY}, {"Stress_XY":stressXY}) #plt.savefig(os.path.join(mSuite.outputPathBase, # "RecoveredDeviatoricStressAndStressValues.png"), # dpi=None, facecolor='w', edgecolor='w', # orientation='portrait', papertype=None, format=None, # transparent=False) # Save to a CSV file. observables = {'recovered DeviatoricStress XX': recDeviatoricStress['XX'], 'recovered DeviatoricStress XX Passed': recDeviatoricStressRes['XX'], 'recovered DeviatoricStress YY': recDeviatoricStress['YY'], 'recovered DeviatoricStress YY Passed': recDeviatoricStressRes['YY'], 'recovered DeviatoricStress XY': recDeviatoricStress['XY'], 'recovered DeviatoricStress XY Passed': recDeviatoricStressRes['XY'], 'stress XX': stress['XX'], 'stress XX Passed': stressRes['XX'], 'stress YY': stress['YY'], 'stress YY Passed': stressRes['YY'], 'stress XY': stress['XY'], 'stress XY Passed': stressRes['XY']} msuite.writeInputsOutputsToCSV(sciBTest.mSuite, observables, "OrthotropicTestStressValues.csv") sciBTest.mSuite.analysisImages = None sciBTest.mSuite.modelImagesToDisplay = None for rGen in getGenerators(["RST", "ReportLab"], sciBTest.outputPathBase): sReps.makeSciBenchReport(sciBTest, mResults, rGen, os.path.join(sciBTest.outputPathBase, "%s-report.%s" %\ (sciBTest.testName, rGen.stdExt)))
plt.plot(t, var / field_scale[field_name], 's', label='AUTOUGH2', zorder=2) plt.xlabel('time (s)') plt.ylabel(field_name + ' (' + field_unit[field_name] + ')') plt.legend(loc='best') img_filename_base = '_'.join((model_name, field_name)) img_filename_base = img_filename_base.replace(' ', '_') img_filename = os.path.join( co2_one_cell_test.mSuite.runs[run_index].basePath, co2_one_cell_test.mSuite.outputPathBase, img_filename_base) plt.tight_layout(pad=3.) plt.savefig(img_filename + '.png', dpi=300) plt.savefig(img_filename + '.pdf') plt.clf() co2_one_cell_test.mSuite.analysisImages.append(img_filename + '.png') # generate report: for rGen in getGenerators(["RST"], co2_one_cell_test.outputPathBase): report_filename = os.path.join( co2_one_cell_test.outputPathBase, "%s-report.%s" % (co2_one_cell_test.testName, rGen.stdExt)) sReps.makeSciBenchReport(co2_one_cell_test, mResults, rGen, report_filename) html_filename = os.path.join( co2_one_cell_test.outputPathBase, "%s-report.%s" % (co2_one_cell_test.testName, 'html')) html = publish_file(source_path=report_filename, destination_path=html_filename, writer_name="html")