コード例 #1
0
ファイル: testTC.py プロジェクト: waiwera/credo
    def test_fieldWithinTolTC_use_reference(self):
        res1 = MockModelResult(0.0100001)
        res2 = MockModelResult(0.0102)
        ref = MockModelResult(0.01)

        tc = FieldWithinTolTC(fieldsToTest=['a'],
                              defFieldTol=0.01,
                              fieldTols=None,
                              expected=ref,
                              testOutputIndex=-1)
        self.assertEqual(True, tc.check(res1))
        self.assertEqual(False, tc.check(res2))
コード例 #2
0
ファイル: testTC.py プロジェクト: waiwera/credo
    def test_fieldWithinTolTC_use_analytic(self):
        res1 = MockModelResult(0.0100001)
        res2 = MockModelResult(0.0102)

        def analytic(pos):
            # check if .getPositions() working
            self.assertEqual(pos, (1.0, 2.0))
            return 0.01

        tc = FieldWithinTolTC(fieldsToTest=['a'],
                              defFieldTol=0.01,
                              fieldTols=None,
                              expected=analytic,
                              testOutputIndex=-1)
        self.assertEqual(True, tc.check(res1))
        self.assertEqual(False, tc.check(res2))
コード例 #3
0
ファイル: test_problem1.py プロジェクト: jposunz/waiwera
AUTOUGH2_result = T2ModelResult("AUTOUGH2",
                                run_filename,
                                geo_filename=t2geo_filename,
                                ordering_map=map_out_atm)

problem1_test.addTestComp(
    run_index, "AUTOUGH2 history at " + obspt,
    HistoryWithinTolTC(fieldsToTest=test_fields,
                       defFieldTol=1.e-3,
                       expected=AUTOUGH2_result,
                       testCellIndex=obs_cell_index))

problem1_test.addTestComp(
    run_index, "AUTOUGH2 t = 1.e9 s",
    FieldWithinTolTC(fieldsToTest=["Temperature"],
                     defFieldTol=1.0e-4,
                     expected=AUTOUGH2_result,
                     testOutputIndex=-1))

digitised_result = {}
for sim in digitised_simulators:
    data = {}
    for field_name in digitised_test_fields:
        data_filename = '_'.join((model_name, field_name, 'time', sim))
        data_filename = data_filename.lower().replace(' ', '_')
        data_filename = os.path.join(data_dir, data_filename + '.dat')
        data[field_name, obs_cell_index] = np.loadtxt(data_filename)
    digitised_result[sim] = HistoryDataResult(sim, data)

for sim in digitised_simulators:
    problem1_test.addTestComp(
        run_index, ' '.join((sim, field_name, obspt)),
コード例 #4
0
    run_filename = run_base_name + '.json'
    inp = json.load(open(os.path.join(base_path, run_filename)))
    if 'minc' in inp['mesh']:
        num_levels = len(inp['mesh']['minc']['geometry']['matrix']['volume'])
    else: num_levels = 0
    lst = t2listing(results_filename)
    lst.last()
    num_minc_cells = lst.element.num_rows - geo.num_blocks
    AUTOUGH2_result[run_name] = T2ModelResult("AUTOUGH2", results_filename,
                                              geo_filename = t2geo_filename,
                                              fieldname_map = AUTOUGH2_FIELDMAP,
                                              ordering_map = minc_level_map(num_levels,
                                                                            num_minc_cells))
    minc_column_test.addTestComp(run_index, "AUTOUGH2",
                      FieldWithinTolTC(fieldsToTest = test_fields,
                                       defFieldTol = 0.025,
                                       expected = AUTOUGH2_result[run_name],
                                       testOutputIndex = -1))

    minc_column_test.addTestComp(run_index, "AUTOUGH2 history",
                          HistoryWithinTolTC(fieldsToTest = test_fields,
                                             defFieldTol = 0.02,
                                             expected = AUTOUGH2_result[run_name],
                                             testCellIndex = obs_cell_index))

    minc_column_test.addTestComp(run_index, "AUTOUGH2 source",
                          HistoryWithinTolTC(fieldsToTest = test_source_fields,
                                             defFieldTol = 0.01,
                                             expected = AUTOUGH2_result[run_name],
                                             testSourceIndex = source_index))

jrunner = SimpleJobRunner(mpi = mpi)
コード例 #5
0
model_run.jobParams['nproc'] = args.np
infiltration_test.mSuite.addRun(model_run, run_name)

infiltration_test.setupEmptyTestCompsList()

results_filename = os.path.join(model_dir, run_base_name + ".listing")
AUTOUGH2_result = T2ModelResult("AUTOUGH2",
                                results_filename,
                                geo_filename=t2geo_filename,
                                fieldname_map=AUTOUGH2_FIELDMAP,
                                ordering_map=map_out_bdy)
for output_index in output_indices:
    infiltration_test.addTestComp(
        run_index, "AUTOUGH2",
        FieldWithinTolTC(fieldsToTest=test_fields,
                         defFieldTol=1.e-4,
                         expected=AUTOUGH2_result,
                         testOutputIndex=output_index))

digitised_result = {}
digitised_times = [864, 5184, 9504]
digitised_output_index = [1, 2, 3]
xmax_all = 0.
for sim in digitised_simulators:
    for field_name in digitised_test_fields:
        for time, output_index in zip(digitised_times, digitised_output_index):
            data_filename = '_'.join([sim, str(time)])
            data_filename = data_filename.lower().replace(' ', '_')
            data_filename = os.path.join(data_dir, data_filename + '.dat')
            result = DigitisedOneDFieldResult(sim, data_filename, field_name,
                                              output_index)
            digitised_result[sim, output_index] = result
コード例 #6
0
model_run.jobParams['nproc'] = num_procs
heat_pipe_test.mSuite.addRun(model_run, run_name)

heat_pipe_test.setupEmptyTestCompsList()
digitised_result = {}

run_base_name = model_name
run_filename = os.path.join(model_dir, run_base_name + ".listing")
AUTOUGH2_result = T2ModelResult("AUTOUGH2", run_filename,
                                fieldname_map = AUTOUGH2_FIELDMAP,
                                 geo_filename = t2geo_filename,
                                 ordering_map = map_out_bdy)

heat_pipe_test.addTestComp(run_index, "AUTOUGH2",
                           FieldWithinTolTC(fieldsToTest = test_fields,
                                            defFieldTol = 5.e-3,
                                            expected = AUTOUGH2_result,
                                            testOutputIndex = -1))

t_final = AUTOUGH2_result.getTimes()[-1]
for sim in digitised_simulators:    
    for field_name in digitised_test_fields:
        data_filename = '_'.join((model_name, field_name, sim))
        data_filename = data_filename.lower().replace(' ', '_')
        data_filename = os.path.join(data_dir, data_filename + '.dat')
        result = DigitisedOneDFieldResult(sim, data_filename, field_name, -1)
        result.data[:,0] = np.exp(result.data[:,0]) * np.sqrt(t_final)
        result.data[:,1] *= field_scale[field_name]
        digitised_result[field_name, sim] = result
        heat_pipe_test.addTestComp(run_index, ' '.join((sim, field_name)),
                                  OneDSolutionWithinTolTC(
                                      fieldsToTest = [field_name],
コード例 #7
0
    # simulator='Waiwera.exe',  # AY_CYGWIN
    basePath=os.path.realpath(MODELDIR))
mrun_s.jobParams['nproc'] = 6

# TODO: specifying nproc in SciBenchmarkTest does not seem to work, model runs'
# jobParams not updated, so a.t.m. this is only for report
sciBTest = SciBenchmarkTest("CC6", nproc=mrun_s.jobParams['nproc'])
sciBTest.description = """Mike's test problem 6, CC6"""
sciBTest.mSuite.addRun(mrun_s, "Waiwera")

sciBTest.setupEmptyTestCompsList()
for runI, mRun in enumerate(sciBTest.mSuite.runs):
    sciBTest.addTestComp(
        runI, "pressu",
        FieldWithinTolTC(fieldsToTest=["Pressure"],
                         defFieldTol=1.0e-5,
                         expected=mres_t,
                         testOutputIndex=-1))
    sciBTest.addTestComp(
        runI, "temp",
        FieldWithinTolTC(fieldsToTest=["Temperature"],
                         defFieldTol=1.0e-5,
                         expected=mres_t,
                         testOutputIndex=-1))
    sciBTest.addTestComp(
        runI, "vapsat",
        FieldWithinTolTC(fieldsToTest=["Vapour saturation"],
                         defFieldTol=1.0e-5,
                         expected=mres_t,
                         testOutputIndex=-1))

jrunner = SimpleJobRunner(mpi=True)
コード例 #8
0
deliverability_test.setupEmptyTestCompsList()

reference_result = {}
for run_index, run_name in enumerate(run_names):

    run_base_name = '_'.join((model_name, run_name))
    run_filename = os.path.join(model_dir, run_base_name + ".listing")
    reference_result[run_name] = T2ModelResult("aut2",
                                               run_filename,
                                               geo_filename=t2geo_filename,
                                               ordering_map=map_out_atm)
    deliverability_test.addTestComp(
        run_index, "final errors",
        FieldWithinTolTC(fieldsToTest=test_fields,
                         defFieldTol=5.e-3,
                         expected=reference_result[run_name],
                         testOutputIndex=-1))
    deliverability_test.addTestComp(
        run_index, "time history",
        HistoryWithinTolTC(fieldsToTest=test_fields,
                           defFieldTol=1.e-2,
                           expected=reference_result[run_name],
                           testCellIndex=4))
    deliverability_test.addTestComp(
        run_index, "source",
        HistoryWithinTolTC(fieldsToTest=test_source_fields,
                           defFieldTol=1.e-2,
                           expected=reference_result[run_name],
                           testSourceIndex=source_index))

jrunner = SimpleJobRunner(mpi=mpi)
コード例 #9
0
sciBTest.description = """Mike's test problem 1, Avdonin solution.
Run 0 is coarse model with 25m radial spacing.
"""

sciBTest.mSuite.addRun(
    T2ModelRun("coarse",
               "coarse.dat",
               geo_filename="gcoarse_dummy.dat",
               basePath=os.path.dirname(os.path.realpath(__file__))),
    "Test problem 1, Avdonin solution, coarse grid")

sciBTest.setupEmptyTestCompsList()
for runI, mRun in enumerate(sciBTest.mSuite.runs):
    sciBTest.addTestComp(
        runI, "temperature",
        FieldWithinTolTC(fieldsToTest=["Temperature"],
                         defFieldTol=1.0e-5,
                         expected=avdonin_at_radius,
                         testOutputIndex=-1))

jrunner = SimpleJobRunner()
testResult, mResults = sciBTest.runTest(
    jrunner,
    # postProcFromExisting=True,
    createReports=True)

# for rGen in getGenerators(["RST", "ReportLab"], sciBTest.outputPathBase):
#     sReps.makeSciBenchReport(sciBTest, mResults, rGen,
#         os.path.join(sciBTest.outputPathBase, "%s-report.%s" %\
#             (sciBTest.testName, rGen.stdExt)))
コード例 #10
0
    def test_test(self):

        model_dir = './run'
        output_dir = './output'
        if not os.path.exists(model_dir): os.mkdir(model_dir)
        base_path = os.path.realpath(model_dir)
        run_index = 0
        test_fields = ['foo']
        tol = 0.01

        for np in [1, 2]:

            run_name = "foo_run"
            mpi = np > 1

            for perturbation in [0.5 * tol, 1.5 * tol]:

                expected_pass = perturbation < tol
                expected_status = 'pass' if expected_pass else 'fail'
                test_name = "foo_test_np_%d_%s" % (np, expected_status)
                test = SciBenchmarkTest(test_name, nproc=np)
                model_run = FooModelRun(run_name,
                                        basePath=base_path,
                                        perturbation=perturbation)
                model_run.jobParams['nproc'] = np
                test.mSuite.addRun(model_run, run_name)

                expected_result = FooModelResult("expected", "")

                test.setupEmptyTestCompsList()
                for ti in range(len(output_times)):
                    test.addTestComp(
                        run_index, "field at time index %d" % ti,
                        FieldWithinTolTC(fieldsToTest=test_fields,
                                         defFieldTol=0.01,
                                         expected=partial(foo,
                                                          t=output_times[ti]),
                                         testOutputIndex=ti))

                test.addTestComp(
                    run_index, "model result field at time index %d" % 2,
                    FieldWithinTolTC(fieldsToTest=test_fields,
                                     defFieldTol=tol,
                                     expected=expected_result,
                                     testOutputIndex=2))

                cell_index = 0
                test.addTestComp(
                    run_index, "history at cell %d" % cell_index,
                    HistoryWithinTolTC(fieldsToTest=test_fields,
                                       defFieldTol=tol,
                                       expected=expected_result,
                                       testCellIndex=cell_index))

                self.assertEqual(len(test.testComps), 1)
                self.assertEqual(len(test.testComps[0]), len(output_times) + 2)

                jrunner = SimpleJobRunner(mpi=mpi)
                test_result, model_results = test.runTest(jrunner,
                                                          createReports=True)

                self.assertEqual(isinstance(test.testStatus, CREDO_PASS),
                                 expected_pass)
コード例 #11
0
    test.mSuite.addRun(model_run, run_name)

test.setupEmptyTestCompsList()

for run_index, run_name in enumerate(run_names):

    run_base_name = '_'.join((model_name, run_name))
    run_filename = os.path.join(model_dir, run_base_name + ".listing")
    reference_result = T2ModelResult("aut2",
                                     run_filename,
                                     geo_filename=t2geo_filename,
                                     ordering_map=map_out_atm)
    test.addTestComp(
        run_index, "final errors",
        FieldWithinTolTC(fieldsToTest=test_fields,
                         defFieldTol=1.e-4,
                         expected=reference_result,
                         testOutputIndex=-1))
    test.addTestComp(
        run_index, "time history LH end",
        HistoryWithinTolTC(fieldsToTest=test_fields,
                           defFieldTol=1.e-3,
                           expected=reference_result,
                           testCellIndex=0))
    test.addTestComp(
        run_index, "time history RH end",
        HistoryWithinTolTC(fieldsToTest=test_fields,
                           defFieldTol=1.e-3,
                           expected=reference_result,
                           testCellIndex=-1))
    test.addTestComp(
        run_index, "source",