def test_aut2(self): mrun = T2ModelRun('test_aut2', 'coarse.dat', basePath=TEST_PATH, ) jrunner = SimpleJobRunner(mpi=False) jmi = jrunner.submitRun(mrun) mres = jrunner.blockResult(mrun, jmi)
def test_waiwera(self): mrun = WaiweraModelRun('test_super_fruit', '', simulator='test_all.exe', basePath=TEST_PATH, ) jrunner = SimpleJobRunner(mpi=True) jmi = jrunner.submitRun(mrun) mres = jrunner.blockResult(mrun, jmi)
for field_name in digitised_test_fields: data_filename = '_'.join((model_name, field_name, 'r', sim)) data_filename = data_filename.lower().replace(' ', '_') data_filename = os.path.join(data_dir, data_filename + '.dat') result = DigitisedOneDFieldResult(sim, data_filename, field_name, -1) digitised_r_result[field_name, sim] = result problem1_test.addTestComp( run_index, ' '.join((sim, field_name)), OneDSolutionWithinTolTC(fieldsToTest=[field_name], defFieldTol=2.e-2, expected=result, testOutputIndex=-1, maxCoordinate=max_radius, logCoordinate=True)) jrunner = SimpleJobRunner(mpi=True) testResult, mResults = problem1_test.runTest(jrunner, createReports=True) # plots: scale = {"Pressure": 1.e5, "Temperature": 1.} unit = {"Pressure": "bar", "Temperature": "$^{\circ}$C"} symbol = {"GeoTrans": 's', "S-Cubed": 'o'} # plot time history results at r = 37.5 m: tc_name = "AUTOUGH2 history at " + obspt sim = 'analytical' data = {} for field_name in digitised_test_fields: data_filename = '_'.join((model_name, field_name, 'time', sim)) data_filename = data_filename.lower().replace(' ', '_')
expected = AUTOUGH2_result[run_name], testOutputIndex = -1)) minc_column_test.addTestComp(run_index, "AUTOUGH2 history", HistoryWithinTolTC(fieldsToTest = test_fields, defFieldTol = 0.02, expected = AUTOUGH2_result[run_name], testCellIndex = obs_cell_index)) minc_column_test.addTestComp(run_index, "AUTOUGH2 source", HistoryWithinTolTC(fieldsToTest = test_source_fields, defFieldTol = 0.01, expected = AUTOUGH2_result[run_name], testSourceIndex = source_index)) jrunner = SimpleJobRunner(mpi = mpi) testResult, mResults = minc_column_test.runTest(jrunner, createReports = True) day = 24. * 60. * 60. for field_name in plot_fields: scale = field_scale[field_name] unit = field_unit[field_name] for run_index, run_name in enumerate(run_names): title = run_name.replace('single', 'single porosity').replace('minc', 'MINC') t, var = minc_column_test.mSuite.resultsList[run_index].\ getFieldHistoryAtCell(field_name, obs_cell_index) plt.plot(t / day, var / scale, '-', label = 'Waiwera ' + title, zorder = 3) t, var = AUTOUGH2_result[run_name].getFieldHistoryAtCell(field_name, obs_cell_index) plt.plot(t[::3] / day, var[::3] / scale, 's', label = 'AUTOUGH2 ' + title, zorder = 2)
def test_jobrunner_mpi(self): mrun = MockModelRun() jrunner = SimpleJobRunner(mpi=True) jmi = jrunner.submitRun(mrun) mres = jrunner.blockResult(mrun, jmi)
sciBTest.description = """Mike's test problem 1, Avdonin solution. Run 0 is coarse model with 25m radial spacing. """ sciBTest.mSuite.addRun( T2ModelRun("coarse", "coarse.dat", geo_filename="gcoarse_dummy.dat", basePath=os.path.dirname(os.path.realpath(__file__))), "Test problem 1, Avdonin solution, coarse grid") sciBTest.setupEmptyTestCompsList() for runI, mRun in enumerate(sciBTest.mSuite.runs): sciBTest.addTestComp( runI, "temperature", FieldWithinTolTC(fieldsToTest=["Temperature"], defFieldTol=1.0e-5, expected=avdonin_at_radius, testOutputIndex=-1)) jrunner = SimpleJobRunner() testResult, mResults = sciBTest.runTest( jrunner, # postProcFromExisting=True, createReports=True) # for rGen in getGenerators(["RST", "ReportLab"], sciBTest.outputPathBase): # sReps.makeSciBenchReport(sciBTest, mResults, rGen, # os.path.join(sciBTest.outputPathBase, "%s-report.%s" %\ # (sciBTest.testName, rGen.stdExt)))
def test_test(self): model_dir = './run' output_dir = './output' if not os.path.exists(model_dir): os.mkdir(model_dir) base_path = os.path.realpath(model_dir) run_index = 0 test_fields = ['foo'] tol = 0.01 for np in [1, 2]: run_name = "foo_run" mpi = np > 1 for perturbation in [0.5 * tol, 1.5 * tol]: expected_pass = perturbation < tol expected_status = 'pass' if expected_pass else 'fail' test_name = "foo_test_np_%d_%s" % (np, expected_status) test = SciBenchmarkTest(test_name, nproc=np) model_run = FooModelRun(run_name, basePath=base_path, perturbation=perturbation) model_run.jobParams['nproc'] = np test.mSuite.addRun(model_run, run_name) expected_result = FooModelResult("expected", "") test.setupEmptyTestCompsList() for ti in range(len(output_times)): test.addTestComp( run_index, "field at time index %d" % ti, FieldWithinTolTC(fieldsToTest=test_fields, defFieldTol=0.01, expected=partial(foo, t=output_times[ti]), testOutputIndex=ti)) test.addTestComp( run_index, "model result field at time index %d" % 2, FieldWithinTolTC(fieldsToTest=test_fields, defFieldTol=tol, expected=expected_result, testOutputIndex=2)) cell_index = 0 test.addTestComp( run_index, "history at cell %d" % cell_index, HistoryWithinTolTC(fieldsToTest=test_fields, defFieldTol=tol, expected=expected_result, testCellIndex=cell_index)) self.assertEqual(len(test.testComps), 1) self.assertEqual(len(test.testComps[0]), len(output_times) + 2) jrunner = SimpleJobRunner(mpi=mpi) test_result, model_results = test.runTest(jrunner, createReports=True) self.assertEqual(isinstance(test.testStatus, CREDO_PASS), expected_pass)