Esempio n. 1
0
        def check_history(result, expected, field, tol, abs_err_tol=1.0):
            t1, p1 = result.getFieldHistoryAtCell(field, 4)
            t2, p2 = expected.getFieldHistoryAtCell(field, 4)
            tc = HistoryWithinTolTC(fieldsToTest=[field],
                                    defFieldTol=tol,
                                    fieldTols=None,
                                    expected=expected,
                                    absoluteErrorTol=abs_err_tol,
                                    testCellIndex=4,
                                    times=None)
            status = tc.check(result)

            if (not status) and SHOW_PLOT:
                from matplotlib import pyplot as plt
                plt.semilogx(t1,
                             p1,
                             '-r+',
                             label=('result (%s)' % result.modelName))
                plt.semilogx(t2,
                             p2,
                             '--b*',
                             label=('expected (%s)' % expected.modelName))
                plt.title('PASS = %s; %s; tol=%f' % (str(status), field, tol))
                plt.legend()
                plt.gca().relim()
                for i, (t, e) in enumerate(zip(tc.times,
                                               tc.fieldErrors[field])):
                    if e > tol:
                        plt.plot(t, p2[i], ' go', alpha=0.5, markersize=10)
                plt.show()

            return status
Esempio n. 2
0
problem1_test.setupEmptyTestCompsList()
digitised_time_result = {}
digitised_r_result = {}

run_base_name = model_name
run_filename = os.path.join(model_dir, run_base_name + ".listing")
AUTOUGH2_result = T2ModelResult("AUTOUGH2",
                                run_filename,
                                geo_filename=t2geo_filename,
                                ordering_map=map_out_atm)

problem1_test.addTestComp(
    run_index, "AUTOUGH2 history at " + obspt,
    HistoryWithinTolTC(fieldsToTest=test_fields,
                       defFieldTol=1.e-3,
                       expected=AUTOUGH2_result,
                       testCellIndex=obs_cell_index))

problem1_test.addTestComp(
    run_index, "AUTOUGH2 t = 1.e9 s",
    FieldWithinTolTC(fieldsToTest=["Temperature"],
                     defFieldTol=1.0e-4,
                     expected=AUTOUGH2_result,
                     testOutputIndex=-1))

digitised_result = {}
for sim in digitised_simulators:
    data = {}
    for field_name in digitised_test_fields:
        data_filename = '_'.join((model_name, field_name, 'time', sim))
        data_filename = data_filename.lower().replace(' ', '_')
Esempio n. 3
0
    lst.last()
    num_minc_cells = lst.element.num_rows - geo.num_blocks
    AUTOUGH2_result[run_name] = T2ModelResult("AUTOUGH2", results_filename,
                                              geo_filename = t2geo_filename,
                                              fieldname_map = AUTOUGH2_FIELDMAP,
                                              ordering_map = minc_level_map(num_levels,
                                                                            num_minc_cells))
    minc_column_test.addTestComp(run_index, "AUTOUGH2",
                      FieldWithinTolTC(fieldsToTest = test_fields,
                                       defFieldTol = 0.025,
                                       expected = AUTOUGH2_result[run_name],
                                       testOutputIndex = -1))

    minc_column_test.addTestComp(run_index, "AUTOUGH2 history",
                          HistoryWithinTolTC(fieldsToTest = test_fields,
                                             defFieldTol = 0.02,
                                             expected = AUTOUGH2_result[run_name],
                                             testCellIndex = obs_cell_index))

    minc_column_test.addTestComp(run_index, "AUTOUGH2 source",
                          HistoryWithinTolTC(fieldsToTest = test_source_fields,
                                             defFieldTol = 0.01,
                                             expected = AUTOUGH2_result[run_name],
                                             testSourceIndex = source_index))

jrunner = SimpleJobRunner(mpi = mpi)
testResult, mResults = minc_column_test.runTest(jrunner, createReports = True)

day = 24. * 60. * 60.

for field_name in plot_fields:
    scale = field_scale[field_name]
Esempio n. 4
0
    def test_times_mechanism(self):
        # not specifying times should force interpolation onto expected times
        tc = HistoryWithinTolTC(fieldsToTest=["Temperature"],
                                defFieldTol=10.0,
                                fieldTols=None,
                                expected=self.mres3,
                                testCellIndex=4,
                                times=None)
        status = tc.check(self.mres4)
        self.assertEqual(len(tc.times), len(self.mres3.getTimes()))

        # not specifying times should force interpolation onto expected times
        tc = HistoryWithinTolTC(fieldsToTest=["Temperature"],
                                defFieldTol=10.0,
                                fieldTols=None,
                                expected=self.mres4,
                                testCellIndex=4,
                                times=None)
        status = tc.check(self.mres3)
        self.assertEqual(len(tc.times), len(self.mres4.getTimes()))

        # specifying times should force interpolation to suppliedtimes
        tc = HistoryWithinTolTC(fieldsToTest=["Temperature"],
                                defFieldTol=10.0,
                                fieldTols=None,
                                expected=self.mres4,
                                testCellIndex=4,
                                times=[0.0, 1.0, 2.0],
                                enforceLogic=False)
        status = tc.check(self.mres3)
        self.assertEqual(len(tc.times), 3)

        # if compare to analtic solution, times will be set to result's
        def solution_fn(pos, t):
            """ dummy """
            return 100.0

        tc = HistoryWithinTolTC(fieldsToTest=["Temperature"],
                                defFieldTol=10.0,
                                fieldTols=None,
                                expected=solution_fn,
                                testCellIndex=4,
                                times=None)
        status = tc.check(self.mres3)
        self.assertEqual(len(tc.times), len(self.mres3.getTimes()))
Esempio n. 5
0
             data[field_name, obs_cell_index[obspt]] = np.loadtxt(data_filename)
     digitised_result[run_name, sim] = HistoryDataResult(sim, data)
     obspt = "production"
     for field_name in test_source_fields:
         data_filename = '_'.join((model_name + run_name, obspt, field_name, sim))
         data_filename = os.path.join(data_dir, data_filename.lower() + '.dat')
         data[field_name, source_index] = np.loadtxt(data_filename)
     digitised_source_result[run_name, sim] = HistoryDataResult(sim, data)
 
 for obspt in obs_points:
     blk_index = obs_cell_index[obspt]
     if obspt == 'total': fields = digitised_test_fields[obspt]
     else: fields = test_fields
     problem5_test.addTestComp(run_index, "AUTOUGH2 " + obspt,
                           HistoryWithinTolTC(fieldsToTest = fields,
                                              defFieldTol = 1.e-3,
                                              expected = AUTOUGH2_result[run_name],
                                              testCellIndex = blk_index))
     for sim in digitised_simulators:
         problem5_test.addTestComp(run_index, ' '.join((sim, obspt)),
                                   HistoryWithinTolTC(fieldsToTest = \
                                                      digitised_test_fields[obspt],
                                                      defFieldTol = 1.5e-2,
                                                      fieldTols = {"Steam": 5.e-2},
                                                      expected = digitised_result[run_name, sim],
                                                      testCellIndex = obs_cell_index[obspt],
                                                      orthogonalError = True))
 obspt = "production"
 problem5_test.addTestComp(run_index, "AUTOUGH2 " + obspt + " source",
                           HistoryWithinTolTC(fieldsToTest = test_source_fields,
                                              defFieldTol = 1.e-3,
                                              expected = AUTOUGH2_result[run_name],
Esempio n. 6
0
                          simulator = 'waiwera',
                          basePath = os.path.realpath(model_dir))
model_run.jobParams['nproc'] = num_procs
problem6_test.mSuite.addRun(model_run, run_name)

problem6_test.setupEmptyTestCompsList()

run_base_name = model_name
run_filename = os.path.join(model_dir, run_base_name + ".listing")
AUTOUGH2_result = T2ModelResult("AUTOUGH2", run_filename,
                                 geo_filename = t2geo_filename,
                                 ordering_map = map_out_atm)

problem6_test.addTestComp(run_index, "AUTOUGH2 " + obspt + ' well',
                          HistoryWithinTolTC(fieldsToTest = test_source_fields,
                                             defFieldTol = 2.e-2,
                                             expected = AUTOUGH2_result,
                                             testSourceIndex = source_index))
digitised_result = {}
digitised_source_result = {}
source_index = 0
for sim in digitised_simulators:
    data = {}
    for field_name in digitised_test_fields:
        data_filename = '_'.join((model_name, obspt, field_name, sim))
        data_filename = data_filename.lower().replace(' ', '_')
        data_filename = os.path.join(data_dir, data_filename + '.dat')
        data[field_name, obs_cell_index] = np.loadtxt(data_filename)
    digitised_result[sim] = HistoryDataResult(sim, data)
    data = {}
    for field_name in test_source_fields:
        data_filename = '_'.join((model_name, obspt, field_name, sim))
Esempio n. 7
0
    run_base_name = '_'.join((model_name, run_name))
    run_filename = os.path.join(model_dir, run_base_name + ".listing")
    reference_result[run_name] = T2ModelResult("aut2",
                                               run_filename,
                                               geo_filename=t2geo_filename,
                                               ordering_map=map_out_atm)
    deliverability_test.addTestComp(
        run_index, "final errors",
        FieldWithinTolTC(fieldsToTest=test_fields,
                         defFieldTol=5.e-3,
                         expected=reference_result[run_name],
                         testOutputIndex=-1))
    deliverability_test.addTestComp(
        run_index, "time history",
        HistoryWithinTolTC(fieldsToTest=test_fields,
                           defFieldTol=1.e-2,
                           expected=reference_result[run_name],
                           testCellIndex=4))
    deliverability_test.addTestComp(
        run_index, "source",
        HistoryWithinTolTC(fieldsToTest=test_source_fields,
                           defFieldTol=1.e-2,
                           expected=reference_result[run_name],
                           testSourceIndex=source_index))

jrunner = SimpleJobRunner(mpi=mpi)
testResult, mResults = deliverability_test.runTest(jrunner, createReports=True)

# plots:
x = [col.centre[0] for col in geo.columnlist]

for run_index, run_name in enumerate(run_names):
Esempio n. 8
0
    def test_test(self):

        model_dir = './run'
        output_dir = './output'
        if not os.path.exists(model_dir): os.mkdir(model_dir)
        base_path = os.path.realpath(model_dir)
        run_index = 0
        test_fields = ['foo']
        tol = 0.01

        for np in [1, 2]:

            run_name = "foo_run"
            mpi = np > 1

            for perturbation in [0.5 * tol, 1.5 * tol]:

                expected_pass = perturbation < tol
                expected_status = 'pass' if expected_pass else 'fail'
                test_name = "foo_test_np_%d_%s" % (np, expected_status)
                test = SciBenchmarkTest(test_name, nproc=np)
                model_run = FooModelRun(run_name,
                                        basePath=base_path,
                                        perturbation=perturbation)
                model_run.jobParams['nproc'] = np
                test.mSuite.addRun(model_run, run_name)

                expected_result = FooModelResult("expected", "")

                test.setupEmptyTestCompsList()
                for ti in range(len(output_times)):
                    test.addTestComp(
                        run_index, "field at time index %d" % ti,
                        FieldWithinTolTC(fieldsToTest=test_fields,
                                         defFieldTol=0.01,
                                         expected=partial(foo,
                                                          t=output_times[ti]),
                                         testOutputIndex=ti))

                test.addTestComp(
                    run_index, "model result field at time index %d" % 2,
                    FieldWithinTolTC(fieldsToTest=test_fields,
                                     defFieldTol=tol,
                                     expected=expected_result,
                                     testOutputIndex=2))

                cell_index = 0
                test.addTestComp(
                    run_index, "history at cell %d" % cell_index,
                    HistoryWithinTolTC(fieldsToTest=test_fields,
                                       defFieldTol=tol,
                                       expected=expected_result,
                                       testCellIndex=cell_index))

                self.assertEqual(len(test.testComps), 1)
                self.assertEqual(len(test.testComps[0]), len(output_times) + 2)

                jrunner = SimpleJobRunner(mpi=mpi)
                test_result, model_results = test.runTest(jrunner,
                                                          createReports=True)

                self.assertEqual(isinstance(test.testStatus, CREDO_PASS),
                                 expected_pass)
Esempio n. 9
0
source_data = {}
source_index = 0
for field in test_source_fields:
    data_filename = '_'.join((model_name, "source", field, ref_sim))
    data_filename = data_filename.lower().replace(' ', '_')
    data_filename = os.path.join(data_dir, data_filename + '.dat')
    source_data[field, source_index] = np.loadtxt(data_filename)
ref_source_result = HistoryDataResult(ref_sim, source_data)

for depth in depths:
    obspt = str(depth)
    problem4_test.addTestComp(
        run_index, "AUTOUGH2 z = " + obspt,
        HistoryWithinTolTC(fieldsToTest=test_fields,
                           defFieldTol=2.e-3,
                           expected=AUTOUGH2_result,
                           testCellIndex=obs_cell_index[depth]))

    problem4_test.addTestComp(
        run_index, ref_sim + " z = " + obspt,
        HistoryWithinTolTC(fieldsToTest=digitised_test_fields[depth],
                           defFieldTol=2.e-2,
                           expected=ref_result,
                           testCellIndex=obs_cell_index[depth],
                           orthogonalError=True))

problem4_test.addTestComp(
    run_index, "AUTOUGH2 source",
    HistoryWithinTolTC(fieldsToTest=test_source_fields,
                       defFieldTol=5.e-3,
                       expected=AUTOUGH2_result,