def test_IntVector(self):
     v1 = api.IntVector()  # empy
     v2 = api.IntVector([i for i in range(10)])  # by list
     v3 = api.IntVector([1, 2, 3])  # simple list
     self.assertEqual(v2.size(), 10)
     self.assertEqual(v1.size(), 0)
     self.assertEqual(len(v3), 3)
    def test_state_with_id_handler(self):
        num_cells = 20
        model_type = pt_gs_k.PTGSKModel
        model = self.build_model(model_type, pt_gs_k.PTGSKParameter, num_cells, 2)
        cids_unspecified = api.IntVector()
        cids_1 = api.IntVector([1])
        cids_2 = api.IntVector([2])

        model_state_12 = model.state.extract_state(cids_unspecified)  # this is how to get all states from model
        model_state_1 = model.state.extract_state(cids_1)  # this is how to get only specified states from model
        model_state_2 = model.state.extract_state(cids_2)
        self.assertEqual(len(model_state_1) + len(model_state_2), len(model_state_12))
        self.assertGreater(len(model_state_1), 0)
        self.assertGreater(len(model_state_2), 0)
        for i in range(len(model_state_1)):  # verify selective extract catchment 1
            self.assertEqual(model_state_1[i].id.cid, 1)
        for i in range(len(model_state_2)):  # verify selective extract catchment 2
            self.assertEqual(model_state_2[i].id.cid, 2)
        for i in range(len(model_state_12)):
            model_state_12[i].state.kirchner.q = 100 + i
        model.state.apply_state(model_state_12, cids_unspecified)  # this is how to put all states into  model
        ms_12 = model.state.extract_state(cids_unspecified)
        for i in range(len(ms_12)):
            self.assertAlmostEqual(ms_12[i].state.kirchner.q, 100 + i)
        for i in range(len(model_state_2)):
            model_state_2[i].state.kirchner.q = 200 + i
        unapplied = model.state.apply_state(model_state_2, cids_2)  # this is how to put a limited set of state into model
        self.assertEqual(len(unapplied), 0)
        ms_12 = model.state.extract_state(cids_unspecified)
        for i in range(len(ms_12)):
            if ms_12[i].id.cid == 1:
                self.assertAlmostEqual(ms_12[i].state.kirchner.q, 100 + i)

        ms_2 = model.state.extract_state(cids_2)
        for i in range(len(ms_2)):
            self.assertAlmostEqual(ms_2[i].state.kirchner.q, 200 + i)

        # serialization support, to and from bytes

        bytes = ms_2.serialize_to_bytes()  # first make some bytes out of the state
        with tempfile.TemporaryDirectory() as tmpdirname:
            file_path = str(path.join(tmpdirname, "pt_gs_k_state_test.bin"))
            api.byte_vector_to_file(file_path, bytes)  # stash it into a file
            bytes = api.byte_vector_from_file(file_path)  # get it back from the file and into ByteVector
        ms_2x = pt_gs_k.deserialize_from_bytes(bytes)  # then restore it from bytes to a StateWithIdVector

        self.assertIsNotNone(ms_2x)
        for i in range(len(ms_2x)):
            self.assertAlmostEqual(ms_2x[i].state.kirchner.q, 200 + i)
Beispiel #3
0
 def _create_target_specvect(self):
     self.tv = api.TargetSpecificationVector()
     tst = api.TsTransform()
     cid_map = self.region_model.catchment_id_map
     for ts_info in self._config.target_ts:
         cid = ts_info['catch_id']
         # mapped_indx = [cid_map.index(ID) for ID in cid if ID in cid_map] # since ID to Index conversion not necessary
         found_indx = np.in1d(cid_map, cid)
         if np.count_nonzero(found_indx) != len(cid):
             raise ConfigSimulatorError(
                 "Catchment index {} for target series {} not found.".
                 format(
                     ','.join([
                         str(val)
                         for val in [i for i in cid if i not in cid_map]
                     ]), ts_info['uid']))
         catch_indx = api.IntVector(cid)
         tsp = ts_info['ts']
         t = api.TargetSpecificationPts()
         t.catchment_indexes = catch_indx
         t.scale_factor = ts_info['weight']
         t.calc_mode = self.obj_funcs[ts_info['obj_func']['name']]
         t.s_r = ts_info['obj_func']['scaling_factors']['s_corr']
         t.s_a = ts_info['obj_func']['scaling_factors']['s_var']
         t.s_b = ts_info['obj_func']['scaling_factors']['s_bias']
         tsa = tst.to_average(ts_info['start_datetime'],
                              ts_info['run_time_step'],
                              ts_info['number_of_steps'], tsp)
         t.ts = tsa
         self.tv.append(t)
    def test_model_area_functions(self):
        num_cells = 20
        model_type = pt_gs_k.PTGSKModel
        model = self.build_model(model_type, pt_gs_k.PTGSKParameter, num_cells)
        # demo how to get area statistics.
        cids = api.IntVector()
        total_area = model.statistics.total_area(cids)
        forest_area = model.statistics.forest_area(cids)
        glacier_area = model.statistics.glacier_area(cids)
        lake_area = model.statistics.lake_area(cids)
        reservoir_area = model.statistics.reservoir_area(cids)
        unspecified_area = model.statistics.unspecified_area(cids)
        snow_area = model.statistics.snow_storage_area(cids)
        self.assertAlmostEqual(snow_area,
                               total_area - lake_area - reservoir_area)
        self.assertAlmostEqual(
            total_area, forest_area + glacier_area + lake_area +
            reservoir_area + unspecified_area)
        elevation = model.statistics.elevation(cids)
        assert abs(elevation - 475 / 2.0) < 1e-3, 'average height'
        cids.append(3)
        try:
            model.statistics.total_area(
                cids)  # now, cids contains 3, that matches no cells
            ok = False
        except RuntimeError as re:
            ok = True
            assert re

        self.assertTrue(ok)
Beispiel #5
0
 def test_calibration_ts_case(self):
     times=[0, 3600, 3600 + 2*3600]
     ta=api.TimeAxis(api.UtcTimeVector(times[0:-1]), times[-1])
     values=api.DoubleVector([0.0]*(len(times) - 1))
     ts=api.TimeSeries(ta, values, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
     target=api.TargetSpecificationPts(ts, api.IntVector([0]), 1.0, api.ABS_DIFF, 1.0, 1.0, 1.0, api.CELL_CHARGE, 'water_balance')
     self.assertIsNotNone(target)
Beispiel #6
0
    def test_percentiles(self):
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)
        timeseries=api.TsVector()

        for i in range(10):
            timeseries.append(
                api.TimeSeries(ta=ta, fill_value=i, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE))

        wanted_percentiles=api.IntVector([api.statistics_property.MIN_EXTREME,
                                          0, 10, 50,
                                          api.statistics_property.AVERAGE,
                                          70, 100,
                                          api.statistics_property.MAX_EXTREME])
        ta_day=api.TimeAxisFixedDeltaT(t0, dt*24, n//24)
        ta_day2=api.TimeAxis(t0, dt*24, n//24)
        percentiles=api.percentiles(timeseries, ta_day, wanted_percentiles)
        percentiles2=timeseries.percentiles(ta_day2, wanted_percentiles)  # just to verify it works with alt. syntax

        self.assertEqual(len(percentiles2), len(percentiles))

        for i in range(len(ta_day)):
            self.assertAlmostEqual(0.0, percentiles[0].value(i), 3, "min-extreme ")
            self.assertAlmostEqual(0.0, percentiles[1].value(i), 3, "  0-percentile")
            self.assertAlmostEqual(0.9, percentiles[2].value(i), 3, " 10-percentile")
            self.assertAlmostEqual(4.5, percentiles[3].value(i), 3, " 50-percentile")
            self.assertAlmostEqual(4.5, percentiles[4].value(i), 3, "   -average")
            self.assertAlmostEqual(6.3, percentiles[5].value(i), 3, " 70-percentile")
            self.assertAlmostEqual(9.0, percentiles[6].value(i), 3, "100-percentile")
            self.assertAlmostEqual(9.0, percentiles[7].value(i), 3, "max-extreme")
Beispiel #7
0
 def _create_target_specvect(self):
     print("Creating TargetSpecificationVector...")
     tv = api.TargetSpecificationVector()
     tst = api.TsTransform()
     cid_map = self.region_model.catchment_id_map
     for repo in self.target_repo:
         tsp = repo['repository'].read([ts_info['uid'] for ts_info in repo['1D_timeseries']], self.time_axis.total_period())
         for ts_info in repo['1D_timeseries']:
             if np.count_nonzero(np.in1d(cid_map, ts_info['catch_id'])) != len(ts_info['catch_id']):
                 raise ConfigSimulatorError("Catchment ID {} for target series {} not found.".format(
                         ','.join([str(val) for val in [i for i in ts_info['catch_id'] if i not in cid_map]]), ts_info['uid']))
             period = api.UtcPeriod(ts_info['start_datetime'],
                                    ts_info['start_datetime'] + ts_info['number_of_steps'] * ts_info['run_time_step'])
             if not self.time_axis.total_period().contains(period):
                 raise ConfigSimulatorError(
                     "Period {} for target series {} is not within the calibration period {}.".format(
                         period.to_string(), ts_info['uid'], self.time_axis.total_period().to_string()))
             #tsp = repo['repository'].read([ts_info['uid']], period)[ts_info['uid']]
             t = api.TargetSpecificationPts()
             t.uid = ts_info['uid']
             t.catchment_indexes = api.IntVector(ts_info['catch_id'])
             t.scale_factor = ts_info['weight']
             t.calc_mode = self.obj_funcs[ts_info['obj_func']['name']]
             [setattr(t, nm, ts_info['obj_func']['scaling_factors'][k]) for nm, k in zip(['s_r','s_a','s_b'], ['s_corr','s_var','s_bias'])]
             t.ts = api.TimeSeries(tst.to_average(ts_info['start_datetime'], ts_info['run_time_step'], ts_info['number_of_steps'], tsp[ts_info['uid']]))
             tv.append(t)
     return tv
    def test_create_target_spec_from_std_time_series(self):
        """
        Verify we can create target-spec giving ordinary ts,
        and that passing a non-fixed time-axis raises exception

        """
        cal = api.Calendar()
        ta = api.TimeAxis(cal.time(2017, 1, 1), api.deltahours(1), 24)
        ts = api.TimeSeries(
            ta,
            fill_value=3.0,
            point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        cids = api.IntVector([0, 2, 3])
        t0 = api.TargetSpecificationPts(ts, cids, 0.7, api.KLING_GUPTA, 1.0,
                                        1.0, 1.0, api.SNOW_COVERED_AREA,
                                        'test_uid')
        self.assertAlmostEqual(t0.ts.value(0), ts.value(0))
        rid = 0
        t1 = api.TargetSpecificationPts(ts, rid, 0.7, api.KLING_GUPTA, 1.0,
                                        1.0, 1.0, 'test_uid')
        self.assertAlmostEqual(t1.ts.value(0), ts.value(0))
        tax = api.TimeAxis(api.UtcTimeVector.from_numpy(ta.time_points[:-1]),
                           ta.total_period().end)
        tsx = api.TimeSeries(
            tax,
            fill_value=2.0,
            point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        tx = api.TargetSpecificationPts(tsx, rid, 0.7, api.KLING_GUPTA, 1.0,
                                        1.0, 1.0, 'test_uid')
        self.assertIsNotNone(tx)
Beispiel #9
0
    def run_calibration(self, model_t):
        # set up configuration
        config_dir = path.join(path.dirname(__file__), "netcdf")
        cfg = orchestration.YAMLConfig(
            "atnsjoen_calibration.yaml", "atnsjoen",
            config_dir=config_dir, data_dir=shyftdata_dir,
            model_t=model_t)
        time_axis = cfg.time_axis

        # get a simulator
        simulator = cfg.get_simulator()

        n_cells = simulator.region_model.size()
        state_repos = DefaultStateRepository(cfg.model_t, n_cells)
        simulator.run(time_axis, state_repos.get_state(0))
        cid = 1

        target_discharge_ts = simulator.region_model.statistics.discharge([cid])
        target_discharge = api.TsTransform().to_average(time_axis.time(0), time_axis.time(1)-time_axis.time(0), time_axis.size(), target_discharge_ts)
        # Perturb parameters
        param = simulator.region_model.get_region_parameter()
        p_vec_orig = [param.get(i) for i in range(param.size())]
        p_vec_min = p_vec_orig[:]
        p_vec_max = p_vec_orig[:]
        p_vec_guess = p_vec_orig[:]
        random.seed(0)
        p_names = []
        for i in range(4):
            p_names.append(param.get_name(i))
            p_vec_min[i] *= 0.5
            p_vec_max[i] *= 1.5
            p_vec_guess[i] = random.uniform(p_vec_min[i], p_vec_max[i])
            if p_vec_min[i] > p_vec_max[i]:
                p_vec_min[i], p_vec_max[i] = p_vec_max[i], p_vec_min[i]
        p_min = simulator.region_model.parameter_t()
        p_max = simulator.region_model.parameter_t()
        p_guess = simulator.region_model.parameter_t()
        p_min.set(p_vec_min)
        p_max.set(p_vec_max)
        p_guess.set(p_vec_guess)

        # Find parameters
        target_spec = api.TargetSpecificationPts(target_discharge, api.IntVector([cid]),
                                                 1.0, api.KLING_GUPTA)
        target_spec_vec = api.TargetSpecificationVector() #([target_spec]) does not yet work
        target_spec_vec.append(target_spec)
        p_opt = simulator.optimize(time_axis, state_repos.get_state(0),
                                   target_spec_vec, p_guess, p_min, p_max)

        simulator.region_model.set_catchment_parameter(cid, p_opt)
        simulator.run(time_axis, state_repos.get_state(0))
        found_discharge = simulator.region_model.statistics.discharge([cid])

        t_vs = np.array([target_discharge.value(i) for i in range(target_discharge.size())])
        t_ts = np.array([target_discharge.time(i) for i in range(target_discharge.size())])
        f_vs = np.array([found_discharge.value(i) for i in range(found_discharge.size())])
        f_ts = np.array([found_discharge.time(i) for i in range(found_discharge.size())])
        self.assertTrue(np.linalg.norm(t_ts - f_ts) < 1.0e-10)
        self.assertTrue(np.linalg.norm(t_vs - f_vs) < 1.0e-3)
 def verify_state_handler(self, model):
     cids_unspecified = api.IntVector()
     states = model.state.extract_state(cids_unspecified)
     self.assertEqual(len(states), model.size())
     state_str = str(states.serialize_to_str())
     states2 = states.__class__.deserialize_from_str(state_str)
     unapplied_list = model.state.apply_state(states, cids_unspecified)
     self.assertEqual(len(unapplied_list), 0)
Beispiel #11
0
    def test_run_geo_ts_data_simulator(self):
        # set up configuration
        cfg = YAMLSimConfig(self.sim_config_file, "neanidelva")

        # create a simulator
        simulator = DefaultSimulator(cfg.region_model_id,
                                     cfg.interpolation_id,
                                     cfg.get_region_model_repo(),
                                     cfg.get_geots_repo(),
                                     cfg.get_interp_repo(),
                                     initial_state_repository=None,
                                     catchments=None)
        state_repos = DefaultStateRepository(simulator.region_model)
        simulator.region_model.set_calculation_filter(api.IntVector([1228]),
                                                      api.IntVector())
        simulator.run(time_axis=cfg.time_axis, state=state_repos.get_state(0))
        sim_copy = simulator.copy()
        sim_copy.region_model.set_calculation_filter(api.IntVector([1228]),
                                                     api.IntVector())
        sim_copy.run(cfg.time_axis, state_repos.get_state(0))
    def test_create_TargetSpecificationPts(self):
        t = api.TargetSpecificationPts();
        t.scale_factor = 1.0
        t.calc_mode = api.NASH_SUTCLIFFE
        t.calc_mode = api.KLING_GUPTA;
        t.s_r = 1.0  # KGEs scale-factors
        t.s_a = 2.0
        t.s_b = 3.0
        self.assertAlmostEqual(t.scale_factor, 1.0)
        # create a ts with some points
        cal = api.Calendar();
        start = cal.time(api.YMDhms(2015, 1, 1, 0, 0, 0))
        dt = api.deltahours(1)
        tsf = api.TsFactory();
        times = api.UtcTimeVector()
        times.push_back(start + 1 * dt);
        times.push_back(start + 3 * dt);
        times.push_back(start + 4 * dt)

        values = api.DoubleVector()
        values.push_back(1.0)
        values.push_back(3.0)
        values.push_back(np.nan)
        tsp = tsf.create_time_point_ts(api.UtcPeriod(start, start + 24 * dt), times, values)
        # convert it from a time-point ts( as returned from current smgrepository) to a fixed interval with timeaxis, needed by calibration
        tst = api.TsTransform()
        tsa = tst.to_average(start, dt, 24, tsp)
        # tsa2 = tst.to_average(start,dt,24,tsp,False)
        # tsa_staircase = tst.to_average_staircase(start,dt,24,tsp,False) # nans infects the complete interval to nan
        # tsa_staircase2 = tst.to_average_staircase(start,dt,24,tsp,True) # skip nans, nans are 0
        # stuff it into the target spec.
        # also show how to specify snow-calibration
        cids = api.IntVector([0, 2, 3])
        t2 = api.TargetSpecificationPts(tsa,cids, 0.7, api.KLING_GUPTA, 1.0, 1.0, 1.0, api.SNOW_COVERED_AREA)
        t2.catchment_property = api.SNOW_WATER_EQUIVALENT
        self.assertEqual(t2.catchment_property, api.SNOW_WATER_EQUIVALENT)
        self.assertIsNotNone(t2.catchment_indexes)
        for i in range(len(cids)):
            self.assertEqual(cids[i],t2.catchment_indexes[i])
        t.ts = tsa
        #TODO: Does not work, list of objects are not yet convertible tv = api.TargetSpecificationVector([t, t2])
        tv=api.TargetSpecificationVector()
        tv.append(t)
        tv.append(t2)
        # now verify we got something ok
        self.assertEqual(2, tv.size())
        self.assertAlmostEqual(tv[0].ts.value(1), 1.5)  # average value 0..1 ->0.5
        self.assertAlmostEqual(tv[0].ts.value(2), 2.5)  # average value 0..1 ->0.5
        self.assertAlmostEqual(tv[0].ts.value(3), 3.0)  # average value 0..1 ->0.5
        # and that the target vector now have its own copy of ts
        tsa.set(1, 3.0)
        self.assertAlmostEqual(tv[0].ts.value(1), 1.5)  # make sure the ts passed onto target spec, is a copy
        self.assertAlmostEqual(tsa.value(1), 3.0)  # and that we really did change the source
Beispiel #13
0
    def test_percentiles_with_min_max_extremes(self):
        """ the percentiles function now also supports picking out the min-max peak value
            within each interval.
            Setup test-data so that we have a well known percentile result,
            but also have peak-values within the interval that we can
            verify.
            We let hour ts 0..9 have values 0..9 constant 24*10 days
               then modify ts[1], every day first  value to a peak min value equal to - day_no*1
                                  every day second value to a peak max value equal to + day_no*1
                                  every day 3rd    value to a nan value
            ts[1] should then have same average value for each day (so same percentile)
                                            but min-max extreme should be equal to +- day_no*1
        """
        c=api.Calendar()
        t0=c.time(2016, 1, 1)
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxis(t0, dt, n)
        timeseries=api.TsVector()
        p_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE
        for i in range(10):
            timeseries.append(api.TimeSeries(ta=ta, fill_value=i, point_fx=p_fx))

        ts=timeseries[1]  # pick this one to insert min/max extremes
        for i in range(0, 240, 24):
            ts.set(i + 0, 1.0 - 100*i/24.0)
            ts.set(i + 1, 1.0 + 100*i/24.0)  # notice that when i==0, this gives 1.0
            ts.set(i + 2, float('nan'))  # also put in a nan, just to verify it is ignored during average processing

        wanted_percentiles=api.IntVector([api.statistics_property.MIN_EXTREME,
                                          0, 10, 50,
                                          api.statistics_property.AVERAGE,
                                          70, 100,
                                          api.statistics_property.MAX_EXTREME])
        ta_day=api.TimeAxis(t0, dt*24, n//24)
        percentiles=api.percentiles(timeseries, ta_day, wanted_percentiles)
        for i in range(len(ta_day)):
            if i == 0:  # first timestep, the min/max extremes are picked from 0'th and 9'th ts.
                self.assertAlmostEqual(0.0, percentiles[0].value(i), 3, "min-extreme ")
                self.assertAlmostEqual(9.0, percentiles[7].value(i), 3, "min-extreme ")
            else:
                self.assertAlmostEqual(1.0 - 100.0*i*24.0/24.0, percentiles[0].value(i), 3, "min-extreme ")
                self.assertAlmostEqual(1.0 + 100.0*i*24.0/24.0, percentiles[7].value(i), 3, "max-extreme")
            self.assertAlmostEqual(0.0, percentiles[1].value(i), 3, "  0-percentile")
            self.assertAlmostEqual(0.9, percentiles[2].value(i), 3, " 10-percentile")
            self.assertAlmostEqual(4.5, percentiles[3].value(i), 3, " 50-percentile")
            self.assertAlmostEqual(4.5, percentiles[4].value(i), 3, "   -average")
            self.assertAlmostEqual(6.3, percentiles[5].value(i), 3, " 70-percentile")
            self.assertAlmostEqual(9.0, percentiles[6].value(i), 3, "100-percentile")
Beispiel #14
0
 def test_int_vector(self):
     dv_from_list = api.IntVector([x for x in range(10)])
     dv_np = np.arange(10, dtype=np.int32)  # notice, default is int64, which does not convert automatically to int32
     dv_from_np = api.IntVector.from_numpy(dv_np)
     self.assertEqual(len(dv_from_list), 10)
     assert_array_almost_equal(dv_from_list.to_numpy(), dv_np)
     assert_array_almost_equal(dv_from_np.to_numpy(), dv_np)
     dv_from_np[5] = 8
     dv_from_np.append(11)
     dv_from_np.push_back(12)
     dv_np[5] = 8
     dv_np.resize(12)
     dv_np[10] = 11
     dv_np[11] = 12
     assert_array_almost_equal(dv_from_np.to_numpy(), dv_np)
 def test_model_area_functions(self):
     num_cells = 20
     model_type = pt_gs_k.PTGSKModel
     model = self.build_model(model_type, pt_gs_k.PTGSKParameter, num_cells)
     # demo how to get area statistics.
     cids = api.IntVector()
     total_area = model.statistics.total_area(cids)
     forest_area = model.statistics.forest_area(cids)
     glacier_area = model.statistics.glacier_area(cids)
     lake_area = model.statistics.lake_area(cids)
     reservoir_area = model.statistics.reservoir_area(cids)
     unspecified_area = model.statistics.unspecified_area(cids)
     self.assertAlmostEqual(total_area, forest_area + glacier_area + lake_area + reservoir_area + unspecified_area)
     cids.append(3)
     total_area_no_match = model.statistics.total_area(cids)  # now, cids contains 3, that matches no cells
     self.assertAlmostEqual(total_area_no_match, 0.0)
Beispiel #16
0
 def _create_target_specvect(self):
     self.tv = api.TargetSpecificationVector()
     tst = api.TsTransform()
     for ts_info in self._config.target_ts:
         mapped_indx = [
             i for i, j in enumerate(self.region_model.catchment_id_map)
             if j in ts_info['catch_id']
         ]
         catch_indx = api.IntVector(mapped_indx)
         tsp = ts_info['ts']
         t = api.TargetSpecificationPts()
         t.catchment_indexes = catch_indx
         t.scale_factor = ts_info['weight']
         t.calc_mode = self.obj_funcs[ts_info['obj_func']['name']]
         t.s_r = ts_info['obj_func']['scaling_factors']['s_corr']
         t.s_a = ts_info['obj_func']['scaling_factors']['s_var']
         t.s_b = ts_info['obj_func']['scaling_factors']['s_bias']
         tsa = tst.to_average(ts_info['start_datetime'],
                              ts_info['run_time_step'],
                              ts_info['number_of_steps'], tsp)
         t.ts = tsa
         self.tv.append(t)
    def test_state_with_id_handler(self):
        num_cells = 20
        model_type = pt_gs_k.PTGSKModel
        model = self.build_model(model_type, pt_gs_k.PTGSKParameter, num_cells,
                                 2)
        cids_unspecified = api.IntVector()
        cids_1 = api.IntVector([1])
        cids_2 = api.IntVector([2])

        model_state_12 = model.state.extract_state(
            cids_unspecified)  # this is how to get all states from model
        model_state_1 = model.state.extract_state(
            cids_1)  # this is how to get only specified states from model
        model_state_2 = model.state.extract_state(cids_2)
        self.assertEqual(
            len(model_state_1) + len(model_state_2), len(model_state_12))
        # We need to store state into yaml text files, for this its nice to have it as a string:
        ms2 = pt_gs_k.PTGSKStateWithIdVector.deserialize_from_str(
            model_state_2.serialize_to_str())  # [0] verify state serialization
        self.assertEqual(len(ms2), len(model_state_2))
        for a, b in zip(ms2, model_state_2):
            self.assertEqual(a.id, b.id)
            self.assertAlmostEqual(a.state.kirchner.q, b.state.kirchner.q)

        self.assertGreater(len(model_state_1), 0)
        self.assertGreater(len(model_state_2), 0)
        for i in range(
                len(model_state_1)):  # verify selective extract catchment 1
            self.assertEqual(model_state_1[i].id.cid, 1)
        for i in range(
                len(model_state_2)):  # verify selective extract catchment 2
            self.assertEqual(model_state_2[i].id.cid, 2)
        for i in range(len(model_state_12)):
            model_state_12[i].state.kirchner.q = 100 + i
        model.state.apply_state(
            model_state_12,
            cids_unspecified)  # this is how to put all states into  model
        ms_12 = model.state.extract_state(cids_unspecified)
        for i in range(len(ms_12)):
            self.assertAlmostEqual(ms_12[i].state.kirchner.q, 100 + i)
        for i in range(len(model_state_2)):
            model_state_2[i].state.kirchner.q = 200 + i
        unapplied = model.state.apply_state(
            model_state_2,
            cids_2)  # this is how to put a limited set of state into model
        self.assertEqual(len(unapplied), 0)
        ms_12 = model.state.extract_state(cids_unspecified)
        for i in range(len(ms_12)):
            if ms_12[i].id.cid == 1:
                self.assertAlmostEqual(ms_12[i].state.kirchner.q, 100 + i)

        ms_2 = model.state.extract_state(cids_2)
        for i in range(len(ms_2)):
            self.assertAlmostEqual(ms_2[i].state.kirchner.q, 200 + i)

        # feature test: serialization support, to and from bytes
        #
        bytes = ms_2.serialize_to_bytes(
        )  # first make some bytes out of the state
        with tempfile.TemporaryDirectory() as tmpdirname:
            file_path = str(path.join(tmpdirname, "pt_gs_k_state_test.bin"))
            api.byte_vector_to_file(file_path, bytes)  # stash it into a file
            bytes = api.byte_vector_from_file(
                file_path)  # get it back from the file and into ByteVector
        ms_2x = pt_gs_k.deserialize_from_bytes(
            bytes)  # then restore it from bytes to a StateWithIdVector

        self.assertIsNotNone(ms_2x)
        for i in range(len(ms_2x)):
            self.assertAlmostEqual(ms_2x[i].state.kirchner.q, 200 + i)

        # feature test: given a state-with-id-vector, get the pure state-vector
        # suitable for rm.initial_state= <state_vector>
        # note however that this is 'unsafe', you need to ensure that size/ordering is ok
        # - that is the purpose of the cell-state-with-id
        #   better solution could be to use
        #     rm.state.apply( state_with_id) .. and maybe check the result, number of states== expected applied
        #     rm.initial_state=rm.current_state  .. a new property to ease typical tasks
        sv_2 = ms_2.state_vector
        self.assertEqual(len(sv_2), len(ms_2))
        for s, sid in zip(sv_2, ms_2):
            self.assertAlmostEqual(s.kirchner.q, sid.state.kirchner.q)
        # example apply, then initial state:
        model.state.apply_state(ms_2, cids_unspecified)
        model.initial_state = model.current_state
Beispiel #18
0
        def test_run_observed_then_arome_and_store(self):
            """
              Start Tistel 2015.09.01, dummy state with some kirchner water
               use observations around Tistel (geo_ts_repository)
               and simulate forwared to 2015.10.01 (store discharge and catchment level precip/temp)
               then use arome forecast for 65 hours (needs arome for this period in arome-directory)
               finally store the arome results.

            """
            utc = Calendar()  # No offset gives Utc
            time_axis = TimeAxisFixedDeltaT(utc.time(YMDhms(2015, 9, 1, 0)), deltahours(1), 30 * 24)
            fc_time_axis = TimeAxisFixedDeltaT(utc.time(YMDhms(2015, 10, 1, 0)), deltahours(1), 65)

            interpolation_id = 0
            ptgsk = DefaultSimulator("Tistel-ptgsk",
                                     interpolation_id,
                                     self.region_model_repository,
                                     self.geo_ts_repository,
                                     self.interpolation_repository, None)
            n_cells = ptgsk.region_model.size()
            ptgsk_state = DefaultStateRepository(ptgsk.region_model.__class__, n_cells)

            ptgsk.region_model.set_state_collection(-1, True)  # collect state so we can inspect it
            s0 = ptgsk_state.get_state(0)
            for i in range(s0.size()):  # add some juice to get started
                s0[i].kirchner.q = 0.5

            ptgsk.run(time_axis, s0)

            print("Done simulation, testing that we can extract data from model")

            cids = api.IntVector()  # we pull out for all the catchments-id if it's empty
            model = ptgsk.region_model  # fetch out  the model
            sum_discharge = model.statistics.discharge(cids)
            self.assertIsNotNone(sum_discharge)
            avg_temperature = model.statistics.temperature(cids)
            avg_precipitation = model.statistics.precipitation(cids)
            self.assertIsNotNone(avg_precipitation)
            self.assertIsNotNone(avg_temperature)
            for time_step in range(time_axis.size()):
                precip_raster = model.statistics.precipitation(cids, time_step)  # example raster output
                self.assertEqual(precip_raster.size(), n_cells)
            avg_gs_lwc = model.gamma_snow_state.lwc(cids)  # sca skaugen|gamma
            self.assertIsNotNone(avg_gs_lwc)
            # lwc surface_heat alpha melt_mean melt iso_pot_energy temp_sw
            avg_gs_output = model.gamma_snow_response.outflow(cids)
            self.assertIsNotNone(avg_gs_output)
            print("done. now save to db")
            # SmGTsRepository(PROD,FC_PROD)
            save_list = [
                TsStoreItem(u'/test/x/shyft/tistel/discharge_m3s', lambda m: m.statistics.discharge(cids)),
                TsStoreItem(u'/test/x/shyft/tistel/temperature', lambda m: m.statistics.temperature(cids)),
                TsStoreItem(u'/test/x/shyft/tistel/precipitation', lambda m: m.statistics.precipitation(cids)),
            ]

            tss = TimeseriesStore(SmGTsRepository(PREPROD, FC_PREPROD), save_list)

            self.assertTrue(tss.store_ts(ptgsk.region_model))

            print("Run forecast arome")
            endstate = ptgsk.region_model.state_t.vector_t()
            ptgsk.region_model.get_states(endstate)  # get the state at end of obs
            ptgsk.geo_ts_repository = self.arome_repository  # switch to arome here
            ptgsk.run_forecast(fc_time_axis, fc_time_axis.start, endstate)  # now forecast
            print("Done forecast")
            fc_save_list = [
                TsStoreItem(u'/test/x/shyft/tistel/fc_discharge_m3s', lambda m: m.statistics.discharge(cids)),
                TsStoreItem(u'/test/x/shyft/tistel/fc_temperature', lambda m: m.statistics.temperature(cids)),
                TsStoreItem(u'/test/x/shyft/tistel/fc_precipitation', lambda m: m.statistics.precipitation(cids)),
                TsStoreItem(u'/test/x/shyft/tistel/fc_radiation', lambda m: m.statistics.radiation(cids)),
                TsStoreItem(u'/test/x/shyft/tistel/fc_rel_hum', lambda m: m.statistics.rel_hum(cids)),
                TsStoreItem(u'/test/x/shyft/tistel/fc_wind_speed', lambda m: m.statistics.wind_speed(cids)),

            ]
            TimeseriesStore(SmGTsRepository(PREPROD, FC_PREPROD), fc_save_list).store_ts(ptgsk.region_model)
            print("Done save to db")
    def test_create_TargetSpecificationPts(self):
        t = api.TargetSpecificationPts()
        t.scale_factor = 1.0
        t.calc_mode = api.NASH_SUTCLIFFE
        t.calc_mode = api.KLING_GUPTA
        t.calc_mode = api.ABS_DIFF
        t.calc_mode = api.RMSE
        t.s_r = 1.0  # KGEs scale-factors
        t.s_a = 2.0
        t.s_b = 3.0
        self.assertIsNotNone(t.uid)
        t.uid = 'test'
        self.assertEqual(t.uid, 'test')
        self.assertAlmostEqual(t.scale_factor, 1.0)
        # create a ts with some points
        cal = api.Calendar()
        start = cal.time(2015, 1, 1, 0, 0, 0)
        dt = api.deltahours(1)
        tsf = api.TsFactory()
        times = api.UtcTimeVector()
        times.push_back(start + 1 * dt)
        times.push_back(start + 3 * dt)
        times.push_back(start + 4 * dt)

        values = api.DoubleVector()
        values.push_back(1.0)
        values.push_back(3.0)
        values.push_back(np.nan)
        tsp = tsf.create_time_point_ts(api.UtcPeriod(start, start + 24 * dt),
                                       times, values)
        # convert it from a time-point ts( as returned from current smgrepository) to a fixed interval with timeaxis, needed by calibration
        tst = api.TsTransform()
        tsa = tst.to_average(start, dt, 24, tsp)
        # tsa2 = tst.to_average(start,dt,24,tsp,False)
        # tsa_staircase = tst.to_average_staircase(start,dt,24,tsp,False) # nans infects the complete interval to nan
        # tsa_staircase2 = tst.to_average_staircase(start,dt,24,tsp,True) # skip nans, nans are 0
        # stuff it into the target spec.
        # also show how to specify snow-calibration
        cids = api.IntVector([0, 2, 3])
        t2 = api.TargetSpecificationPts(tsa, cids, 0.7, api.KLING_GUPTA, 1.0,
                                        1.0, 1.0, api.SNOW_COVERED_AREA,
                                        'test_uid')
        self.assertEqual(t2.uid, 'test_uid')
        t2.catchment_property = api.SNOW_WATER_EQUIVALENT
        self.assertEqual(t2.catchment_property, api.SNOW_WATER_EQUIVALENT)
        t2.catchment_property = api.CELL_CHARGE
        self.assertEqual(t2.catchment_property, api.CELL_CHARGE)
        self.assertIsNotNone(t2.catchment_indexes)
        for i in range(len(cids)):
            self.assertEqual(cids[i], t2.catchment_indexes[i])
        t.ts = api.TimeSeries(tsa)  # target spec is now a regular TimeSeries
        tv = api.TargetSpecificationVector()
        tv[:] = [t, t2]
        # now verify we got something ok
        self.assertEqual(2, tv.size())
        self.assertAlmostEqual(tv[0].ts.value(1),
                               1.5)  # average value 0..1 ->0.5
        self.assertAlmostEqual(tv[0].ts.value(2),
                               2.5)  # average value 0..1 ->0.5
        # self.assertAlmostEqual(tv[0].ts.value(3), 3.0)  # original flat out at end, but now:
        self.assertTrue(math.isnan(
            tv[0].ts.value(3)))  # strictly linear between points.
        # and that the target vector now have its own copy of ts
        tsa.set(1, 3.0)
        self.assertAlmostEqual(
            tv[0].ts.value(1),
            1.5)  # make sure the ts passed onto target spec, is a copy
        self.assertAlmostEqual(tsa.value(1),
                               3.0)  # and that we really did change the source
        # Create a clone of target specification vector
        tv2 = api.TargetSpecificationVector(tv)
        self.assertEqual(2, tv2.size())
        self.assertAlmostEqual(tv2[0].ts.value(1),
                               1.5)  # average value 0..1 ->0.5
        self.assertAlmostEqual(tv2[0].ts.value(2),
                               2.5)  # average value 0..1 ->0.5
        self.assertTrue(math.isnan(
            tv2[0].ts.value(3)))  # average value 0..1 ->0.5
        tv2[0].scale_factor = 10.0
        self.assertAlmostEqual(tv[0].scale_factor, 1.0)
        self.assertAlmostEqual(tv2[0].scale_factor, 10.0)
        # test we can create from breakpoint time-series
        ts_bp = api.TimeSeries(api.TimeAxis(api.UtcTimeVector([0, 25, 20]),
                                            30),
                               fill_value=2.0,
                               point_fx=api.POINT_AVERAGE_VALUE)

        tspec_bp = api.TargetSpecificationPts(ts_bp, cids, 0.7,
                                              api.KLING_GUPTA, 1.0, 1.0, 1.0,
                                              api.CELL_CHARGE, 'test_uid')
        self.assertIsNotNone(tspec_bp)
 def verify_state_handler(self, model):
     cids_unspecified = api.IntVector()
     states = model.state.extract_state(cids_unspecified)
     self.assertEqual(len(states), model.size())
     unapplied_list = model.state.apply_state(states, cids_unspecified)
     self.assertEqual(len(unapplied_list), 0)
Beispiel #21
0
    def test_a_time_series_vector(self):
        c=api.Calendar()
        t0=api.utctime_now()
        dt=api.deltahours(1)
        n=240
        ta=api.TimeAxisFixedDeltaT(t0, dt, n)

        a=api.TimeSeries(ta=ta, fill_value=3.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        b=api.TimeSeries(ta=ta, fill_value=2.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        c=api.TimeSeries(ta=ta, fill_value=10.0, point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        v=api.TsVector()
        v.append(a)
        v.append(b)

        self.assertEqual(len(v), 2)
        self.assertAlmostEqual(v[0].value(0), 3.0, "expect first ts to be 3.0")
        aa=api.TimeSeries(ta=a.time_axis, values=a.values,
                          point_fx=api.point_interpretation_policy.POINT_AVERAGE_VALUE)  # copy construct (really copy the values!)
        a.fill(1.0)
        self.assertAlmostEqual(v[0].value(0), 1.0, "expect first ts to be 1.0, because the vector keeps a reference ")
        self.assertAlmostEqual(aa.value(0), 3.0)

        vt=v.values_at(t0).to_numpy()
        self.assertEqual(len(vt), len(v))
        v1=v[0:1]
        self.assertEqual(len(v1), 1)
        self.assertAlmostEqual(v1[0].value(0), 1.0)
        v_clone=api.TsVector(v)
        self.assertEqual(len(v_clone), len(v))
        del v_clone[-1]
        self.assertEqual(len(v_clone), 1)
        self.assertEqual(len(v), 2)
        v_slice_all=v.slice(api.IntVector())
        v_slice_1=v.slice(api.IntVector([1]))
        v_slice_12=v.slice(api.IntVector([0, 1]))
        self.assertEqual(len(v_slice_all), 2)
        self.assertEqual(len(v_slice_1), 1)
        self.assertAlmostEqual(v_slice_1[0].value(0), 2.0)
        self.assertEqual(len(v_slice_12), 2)
        self.assertAlmostEqual(v_slice_12[0].value(0), 1.0)

        # multiplication by scalar
        v_x_2a=v*2.0
        v_x_2b=2.0*v
        for i in range(len(v)):
            self.assertAlmostEqual(v_x_2a[i].value(0), 2*v[i].value(0))
            self.assertAlmostEqual(v_x_2b[i].value(0), 2*v[i].value(0))

        # division by scalar
        v_d_a=v/3.0
        v_d_b=3.0/v
        for i in range(len(v)):
            self.assertAlmostEqual(v_d_a[i].value(0), v[i].value(0)/3.0)
            self.assertAlmostEqual(v_d_b[i].value(0), 3.0/v[i].value(0))

        # addition by scalar
        v_a_a=v + 3.0
        v_a_b=3.0 + v
        for i in range(len(v)):
            self.assertAlmostEqual(v_a_a[i].value(0), v[i].value(0) + 3.0)
            self.assertAlmostEqual(v_a_b[i].value(0), 3.0 + v[i].value(0))

        # sub by scalar
        v_s_a=v - 3.0
        v_s_b=3.0 - v
        for i in range(len(v)):
            self.assertAlmostEqual(v_s_a[i].value(0), v[i].value(0) - 3.0)
            self.assertAlmostEqual(v_s_b[i].value(0), 3.0 - v[i].value(0))

        # multiplication vector by ts
        v_x_ts=v*c
        ts_x_v=c*v
        for i in range(len(v)):
            self.assertAlmostEqual(v_x_ts[i].value(0), v[i].value(0)*c.value(0))
            self.assertAlmostEqual(ts_x_v[i].value(0), c.value(0)*v[i].value(0))

        # division vector by ts
        v_d_ts=v/c
        ts_d_v=c/v
        for i in range(len(v)):
            self.assertAlmostEqual(v_d_ts[i].value(0), v[i].value(0)/c.value(0))
            self.assertAlmostEqual(ts_d_v[i].value(0), c.value(0)/v[i].value(0))

        # add vector by ts
        v_a_ts=v + c
        ts_a_v=c + v
        for i in range(len(v)):
            self.assertAlmostEqual(v_a_ts[i].value(0), v[i].value(0) + c.value(0))
            self.assertAlmostEqual(ts_a_v[i].value(0), c.value(0) + v[i].value(0))

        # sub vector by ts
        v_s_ts=v - c
        ts_s_v=c - v
        for i in range(len(v)):
            self.assertAlmostEqual(v_s_ts[i].value(0), v[i].value(0) - c.value(0))
            self.assertAlmostEqual(ts_s_v[i].value(0), c.value(0) - v[i].value(0))

        # vector mult vector
        va=v
        vb=2.0*v

        v_m_v=va*vb
        self.assertEqual(len(v_m_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_m_v[i].value(0), va[i].value(0)*vb[i].value(0))

        # vector div vector
        v_d_v=va/vb
        self.assertEqual(len(v_d_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_d_v[i].value(0), va[i].value(0)/vb[i].value(0))

        # vector add vector
        v_a_v=va + vb
        self.assertEqual(len(v_a_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_a_v[i].value(0), va[i].value(0) + vb[i].value(0))

        # vector sub vector
        v_s_v=va - vb
        self.assertEqual(len(v_s_v), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_s_v[i].value(0), va[i].value(0) - vb[i].value(0))

        # vector unary minus
        v_u=- va
        self.assertEqual(len(v_u), len(va))
        for i in range(len(va)):
            self.assertAlmostEqual(v_u[i].value(0), -va[i].value(0))

        # integral functions, just to verify exposure works, and one value is according to spec.
        ta2=api.TimeAxis(t0, dt*24, n//24)
        v_avg=v.average(ta2)
        v_int=v.integral(ta2)
        v_acc=v.accumulate(ta2)
        v_sft=v.time_shift(dt*24)
        self.assertIsNotNone(v_avg)
        self.assertIsNotNone(v_int)
        self.assertIsNotNone(v_acc)
        self.assertIsNotNone(v_sft)
        self.assertAlmostEqual(v_avg[0].value(0), 1.0)
        self.assertAlmostEqual(v_int[0].value(0), 86400.0)
        self.assertAlmostEqual(v_acc[0].value(0), 0.0)
        self.assertAlmostEqual(v_sft[0].time(0), t0 + dt*24)

        # min/max functions
        min_v_double=va.min(-1000.0)
        max_v_double=va.max(1000.0)
        self.assertAlmostEqual(min_v_double[0].value(0), -1000.0)
        self.assertAlmostEqual(max_v_double[0].value(0), +1000.0)
        min_v_double=api.min(va, -1000.0)
        max_v_double=api.max(va, +1000.0)
        self.assertAlmostEqual(min_v_double[0].value(0), -1000.0)
        self.assertAlmostEqual(max_v_double[0].value(0), +1000.0)
        # c = 10.0
        c1000=100.0*c
        min_v_double=va.min(-c1000)
        max_v_double=va.max(c1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -c1000.value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), c1000.value(0))
        min_v_double=api.min(va, -c1000)
        max_v_double=api.max(va, c1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -c1000.value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), c1000.value(0))

        v1000=va*1000.0
        min_v_double=va.min(-v1000)
        max_v_double=va.max(v1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -v1000[0].value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), v1000[0].value(0))
        min_v_double=api.min(va, -v1000)
        max_v_double=api.max(va, v1000)
        self.assertAlmostEqual(min_v_double[0].value(0), -v1000[0].value(0))
        self.assertAlmostEqual(max_v_double[0].value(0), v1000[0].value(0))

        # finally, test that exception is raised if we try to multiply two unequal sized vectors

        try:
            x=v_clone*va
            self.assertTrue(False, 'We expected exception for unequal sized ts-vector op')
        except RuntimeError as re:
            pass

        # also test that empty vector + vector -> vector etc.
        va_2=va + api.TsVector()
        va_3=api.TsVector() + va
        va_4=va - api.TsVector()
        va_5=api.TsVector() - va
        va_x=api.TsVector() + api.TsVector()
        self.assertEqual(len(va_2), len(va))
        self.assertEqual(len(va_3), len(va))
        self.assertEqual(len(va_4), len(va))
        self.assertEqual(len(va_5), len(va))
        self.assertEqual(not va_x, True)
        self.assertEqual(not va_2, False)
        va_2_ok=False
        va_x_ok=True
        if va_2:
            va_2_ok=True
        if va_x:
            va_x_ok=False
        self.assertTrue(va_2_ok)
        self.assertTrue(va_x_ok)
    def run_calibration(self, model_t):
        # set up configuration
        config_dir = path.join(path.dirname(__file__), "netcdf")
        cfg = orchestration.YAMLConfig("atnsjoen_calibration.yaml",
                                       "atnsjoen",
                                       config_dir=config_dir,
                                       data_dir=shyftdata_dir,
                                       model_t=model_t)
        time_axis = cfg.time_axis

        # get a simulator
        simulator = cfg.get_simulator()

        n_cells = simulator.region_model.size()
        state_repos = DefaultStateRepository(cfg.model_t, n_cells)
        s0 = state_repos.get_state(0)
        param = simulator.region_model.get_region_parameter()
        # not needed, we auto initialize to default if not done explicitely
        #if model_t in [pt_hs_k.PTHSKOptModel]:
        #    for i in range(len(s0)):
        #        s0[i].snow.distribute(param.hs)
        simulator.run(time_axis, s0)
        cid = 1

        target_discharge_ts = simulator.region_model.statistics.discharge(
            [cid])
        target_discharge = api.TsTransform().to_average(
            time_axis.time(0),
            time_axis.time(1) - time_axis.time(0), time_axis.size(),
            target_discharge_ts)
        # Perturb parameters
        p_vec_orig = [param.get(i) for i in range(param.size())]
        p_vec_min = p_vec_orig[:]
        p_vec_max = p_vec_orig[:]
        p_vec_guess = p_vec_orig[:]
        random.seed(0)
        p_names = []
        for i in range(4):
            p_names.append(param.get_name(i))
            p_vec_min[i] *= 0.5
            p_vec_max[i] *= 1.5
            p_vec_guess[i] = random.uniform(p_vec_min[i], p_vec_max[i])
            if p_vec_min[i] > p_vec_max[i]:
                p_vec_min[i], p_vec_max[i] = p_vec_max[i], p_vec_min[i]
        p_min = simulator.region_model.parameter_t()
        p_max = simulator.region_model.parameter_t()
        p_guess = simulator.region_model.parameter_t()
        p_min.set(p_vec_min)
        p_max.set(p_vec_max)
        p_guess.set(p_vec_guess)

        # Find parameters
        target_spec = api.TargetSpecificationPts(target_discharge,
                                                 api.IntVector([cid]), 1.0,
                                                 api.KLING_GUPTA)
        target_spec_vec = api.TargetSpecificationVector(
        )  # ([target_spec]) does not yet work
        target_spec_vec.append(target_spec)
        self.assertEqual(simulator.optimizer.trace_size,
                         0)  # before optmize, trace_size should be 0
        p_opt = simulator.optimize(time_axis, s0, target_spec_vec, p_guess,
                                   p_min, p_max)
        self.assertGreater(simulator.optimizer.trace_size,
                           0)  # after opt, some trace values should be there
        # the trace values are in the order of appearance 0...trace_size-1
        #
        goal_fn_values = simulator.optimizer.trace_goal_function_values.to_numpy(
        )  # all of them, as np array
        self.assertEqual(len(goal_fn_values), simulator.optimizer.trace_size)
        p_last = simulator.optimizer.trace_parameter(
            simulator.optimizer.trace_size -
            1)  # get out the last (not neccessary the best)
        self.assertIsNotNone(p_last)
        simulator.region_model.set_catchment_parameter(cid, p_opt)
        simulator.run(time_axis, s0)
        found_discharge = simulator.region_model.statistics.discharge([cid])

        t_vs = np.array([
            target_discharge.value(i) for i in range(target_discharge.size())
        ])
        t_ts = np.array(
            [target_discharge.time(i) for i in range(target_discharge.size())])
        f_vs = np.array(
            [found_discharge.value(i) for i in range(found_discharge.size())])
        f_ts = np.array(
            [found_discharge.time(i) for i in range(found_discharge.size())])
        self.assertTrue(np.linalg.norm(t_ts - f_ts) < 1.0e-10)
        self.assertTrue(np.linalg.norm(t_vs - f_vs) < 1.0e-3)
Beispiel #23
0
    def test_model_initialize_and_run(self):
        num_cells = 20
        model_type = pt_gs_k.PTGSKModel
        model = self.build_model(model_type, pt_gs_k.PTGSKParameter, num_cells)
        self.assertEqual(model.size(), num_cells)
        # now modify snow_cv forest_factor to 0.1
        region_parameter = model.get_region_parameter()
        region_parameter.gs.snow_cv_forest_factor = 0.1
        region_parameter.gs.snow_cv_altitude_factor = 0.0001
        self.assertEqual(region_parameter.gs.snow_cv_forest_factor, 0.1)
        self.assertEqual(region_parameter.gs.snow_cv_altitude_factor, 0.0001)

        self.assertAlmostEqual(region_parameter.gs.effective_snow_cv(1.0, 0.0),
                               region_parameter.gs.snow_cv + 0.1)
        self.assertAlmostEqual(
            region_parameter.gs.effective_snow_cv(1.0, 1000.0),
            region_parameter.gs.snow_cv + 0.1 + 0.1)
        cal = api.Calendar()
        time_axis = api.Timeaxis(cal.time(api.YMDhms(2015, 1, 1, 0, 0, 0)),
                                 api.deltahours(1), 240)
        model_interpolation_parameter = api.InterpolationParameter()
        # degC/m, so -0.5 degC/100m
        model_interpolation_parameter.temperature_idw.default_temp_gradient = -0.005
        # if possible use closest neighbor points and solve gradient using equation,(otherwise default min/max height)
        model_interpolation_parameter.temperature_idw.gradient_by_equation = True
        # Max number of temperature sources used for one interpolation
        model_interpolation_parameter.temperature_idw.max_members = 6
        # 20 km is max distance
        model_interpolation_parameter.temperature_idw.max_distance = 20000
        # Pure linear interpolation
        model_interpolation_parameter.temperature_idw.distance_measure_factor = 1.0
        # This enables IDW with default temperature gradient.
        model_interpolation_parameter.use_idw_for_temperature = True
        self.assertAlmostEqual(
            model_interpolation_parameter.precipitation.scale_factor,
            1.02)  # just verify this one is as before change to scale_factor
        model.run_interpolation(
            model_interpolation_parameter, time_axis,
            self.create_dummy_region_environment(
                time_axis,
                model.get_cells()[int(num_cells / 2)].geo.mid_point()))
        s0 = pt_gs_k.PTGSKStateVector()
        for i in range(num_cells):
            si = pt_gs_k.PTGSKState()
            si.kirchner.q = 40.0
            s0.append(si)
        model.set_states(s0)
        model.set_state_collection(
            -1, True)  # enable state collection for all cells
        model.run_cells()
        cids = api.IntVector(
        )  # optional, we can add selective catchment_ids here
        sum_discharge = model.statistics.discharge(cids)

        self.assertIsNotNone(sum_discharge)
        avg_temperature = model.statistics.temperature(cids)
        avg_precipitation = model.statistics.precipitation(cids)
        self.assertIsNotNone(avg_precipitation)
        for time_step in range(time_axis.size()):
            precip_raster = model.statistics.precipitation(
                cids, time_step)  # example raster output
            self.assertEqual(precip_raster.size(), num_cells)
        avg_gs_sca = model.gamma_snow_response.sca(cids)  # swe output
        self.assertIsNotNone(avg_gs_sca)
        # lwc surface_heat alpha melt_mean melt iso_pot_energy temp_sw
        avg_gs_albedo = model.gamma_snow_state.albedo(cids)
        self.assertIsNotNone(avg_gs_albedo)
        self.assertEqual(avg_temperature.size(), time_axis.size(),
                         "expect results equal to time-axis size")
        copy_region_model = model.__class__(model)
        self.assertIsNotNone(copy_region_model)
        copy_region_model.run_cells(
        )  #just to verify we can copy and run the new model
Beispiel #24
0
    def test_snow_and_ground_water_response_calibration(self):
        """
        Test dual calibration strategy:
            * First fit the three Kirchner parameters for
              ground water response during July, August, and
              September.
            * Then fit two snow routine parameters (tx and max_water)
              from November to April.
        """
        # Simulation time axis
        dt = api.deltahours(24)
        n_steps = 364
        utc = api.Calendar()  # No offset gives Utc
        t0 = utc.time(2013, 9, 1, 0)
        time_axis = api.TimeAxisFixedDeltaT(t0, dt, n_steps)

        # Some fake ids
        region_id = 0
        interpolation_id = 0
        opt_model_t = self.model_config.model_type().opt_model_t
        model_config = ModelConfig(self.model_config_file,
                                   overrides={'model_t': opt_model_t})
        region_model_repository = CFRegionModelRepository(
            self.region_config, model_config)
        interp_repos = InterpolationParameterRepository(
            self.interpolation_config)
        netcdf_geo_ts_repos = [
            CFDataRepository(32633,
                             source["params"]["filename"],
                             padding=source["params"]['padding'])
            for source in self.dataset_config.sources
        ]
        geo_ts_repository = GeoTsRepositoryCollection(netcdf_geo_ts_repos)

        # Construct target discharge series
        simulator = DefaultSimulator(region_id, interpolation_id,
                                     region_model_repository,
                                     geo_ts_repository, interp_repos, None)
        state_repos = DefaultStateRepository(simulator.region_model)
        simulator.run(time_axis, state_repos.get_state(0))
        cid = 1228
        target_discharge = api.TsTransform().to_average(
            t0, dt, n_steps,
            simulator.region_model.statistics.discharge([cid]))

        # Construct kirchner parameters
        param = simulator.region_model.parameter_t(
            simulator.region_model.get_region_parameter())
        print_param("True solution", param)

        kirchner_param_min = simulator.region_model.parameter_t(param)
        kirchner_param_max = simulator.region_model.parameter_t(param)
        # Kichner parameters are quite abstract (no physical meaning), so simply scale them
        kirchner_param_min.kirchner.c1 *= 0.8
        kirchner_param_min.kirchner.c2 *= 0.8
        kirchner_param_min.kirchner.c3 *= 0.8
        kirchner_param_max.kirchner.c1 *= 1.2
        kirchner_param_max.kirchner.c2 *= 1.2
        kirchner_param_max.kirchner.c3 *= 1.2
        # kirchner_t_start = utc.time(2011, 4, 1, 0)
        # kirchner_time_axis = api.TimeAxisFixedDeltaT(kirchner_t_start, dt, 150)
        kirchner_time_axis = time_axis

        # Construct gamma snow parameters (realistic tx and max_lwc)
        gamma_snow_param_min = simulator.region_model.parameter_t(param)
        gamma_snow_param_max = simulator.region_model.parameter_t(param)
        gamma_snow_param_min.gs.tx = -1.0  # Min snow/rain temperature threshold
        gamma_snow_param_min.gs.max_water = 0.05  # Min 8% max water in snow in costal regions
        gamma_snow_param_max.gs.tx = 1.0
        gamma_snow_param_max.gs.max_water = 0.25  # Max 35% max water content, or we get too little melt
        gs_t_start = utc.time(2013, 11, 1, 0)
        gs_time_axis = api.TimeAxisFixedDeltaT(gs_t_start, dt, 250)
        # gs_time_axis = time_axis

        # Find parameters
        target_spec = api.TargetSpecificationPts(target_discharge,
                                                 api.IntVector([cid]), 1.0,
                                                 api.KLING_GUPTA)
        target_spec_vec = api.TargetSpecificationVector(
        )  # TODO: We currently dont fix list initializer for vectors
        target_spec_vec.append(target_spec)
        # Construct a fake, perturbed starting point for calibration
        p_vec = [param.get(i) for i in range(param.size())]
        for i, name in enumerate(
            [param.get_name(i) for i in range(len(p_vec))]):
            if name not in ("c1" "c2", "c3", "TX", "max_water"):
                next
            if name in ("c1", "c2", "c3"):
                p_vec[i] = random.uniform(0.8 * p_vec[i], 1.2 * p_vec[i])
            elif name == "TX":
                p_vec[i] = random.uniform(gamma_snow_param_min.gs.tx,
                                          gamma_snow_param_max.gs.tx)
            elif name == "max_water":
                p_vec[i] = random.uniform(gamma_snow_param_min.gs.max_water,
                                          gamma_snow_param_max.gs.max_water)
        param.set(p_vec)
        print_param("Initial guess", param)
        # Two pass optimization, once for the ground water response, and second time for
        kirchner_p_opt = simulator.optimize(kirchner_time_axis,
                                            state_repos.get_state(0),
                                            target_spec_vec, param,
                                            kirchner_param_min,
                                            kirchner_param_max)
        gamma_snow_p_opt = simulator.optimize(gs_time_axis,
                                              state_repos.get_state(0),
                                              target_spec_vec, kirchner_p_opt,
                                              gamma_snow_param_min,
                                              gamma_snow_param_max)
        print_param("Half way result", kirchner_p_opt)
        print_param("Result", gamma_snow_p_opt)

        simulator.region_model.set_catchment_parameter(cid, gamma_snow_p_opt)
        simulator.run(time_axis, state_repos.get_state(0))
        found_discharge = simulator.region_model.statistics.discharge([cid])
    def test_model_initialize_and_run(self):
        num_cells = 20
        model_type = pt_gs_k.PTGSKModel
        model = self.build_model(model_type, pt_gs_k.PTGSKParameter, num_cells)
        self.assertEqual(model.size(), num_cells)
        self.verify_state_handler(model)
        # demo of feature for threads
        self.assertGreaterEqual(model.ncore,
                                1)  # defaults to hardware concurrency
        model.ncore = 4  # set it to 4, and
        self.assertEqual(model.ncore, 4)  # verify it works

        # now modify snow_cv forest_factor to 0.1
        region_parameter = model.get_region_parameter()
        region_parameter.gs.snow_cv_forest_factor = 0.1
        region_parameter.gs.snow_cv_altitude_factor = 0.0001
        self.assertEqual(region_parameter.gs.snow_cv_forest_factor, 0.1)
        self.assertEqual(region_parameter.gs.snow_cv_altitude_factor, 0.0001)

        self.assertAlmostEqual(region_parameter.gs.effective_snow_cv(1.0, 0.0),
                               region_parameter.gs.snow_cv + 0.1)
        self.assertAlmostEqual(
            region_parameter.gs.effective_snow_cv(1.0, 1000.0),
            region_parameter.gs.snow_cv + 0.1 + 0.1)
        cal = api.Calendar()
        time_axis = api.TimeAxisFixedDeltaT(cal.time(2015, 1, 1, 0, 0, 0),
                                            api.deltahours(1), 240)
        model_interpolation_parameter = api.InterpolationParameter()
        # degC/m, so -0.5 degC/100m
        model_interpolation_parameter.temperature_idw.default_temp_gradient = -0.005
        # if possible use closest neighbor points and solve gradient using equation,(otherwise default min/max height)
        model_interpolation_parameter.temperature_idw.gradient_by_equation = True
        # Max number of temperature sources used for one interpolation
        model_interpolation_parameter.temperature_idw.max_members = 6
        # 20 km is max distance
        model_interpolation_parameter.temperature_idw.max_distance = 20000
        # zscale is used to discriminate neighbors at different elevation than target point
        self.assertAlmostEqual(
            model_interpolation_parameter.temperature_idw.zscale, 1.0)
        model_interpolation_parameter.temperature_idw.zscale = 0.5
        self.assertAlmostEqual(
            model_interpolation_parameter.temperature_idw.zscale, 0.5)
        # Pure linear interpolation
        model_interpolation_parameter.temperature_idw.distance_measure_factor = 1.0
        # This enables IDW with default temperature gradient.
        model_interpolation_parameter.use_idw_for_temperature = True
        self.assertAlmostEqual(
            model_interpolation_parameter.precipitation.scale_factor,
            1.02)  # just verify this one is as before change to scale_factor
        model.initialize_cell_environment(
            time_axis
        )  # just show how we can split the run_interpolation into two calls(second one optional)
        model.interpolate(
            model_interpolation_parameter,
            self.create_dummy_region_environment(
                time_axis,
                model.get_cells()[int(num_cells / 2)].geo.mid_point()))
        m_ip_parameter = model.interpolation_parameter  # illustrate that we can get back the passed interpolation parameter as a property of the model
        self.assertEqual(
            m_ip_parameter.use_idw_for_temperature,
            True)  # just to ensure we really did get back what we passed in
        self.assertAlmostEqual(m_ip_parameter.temperature_idw.zscale, 0.5)
        s0 = pt_gs_k.PTGSKStateVector()
        for i in range(num_cells):
            si = pt_gs_k.PTGSKState()
            si.kirchner.q = 40.0
            s0.append(si)
        model.set_states(s0)
        model.set_state_collection(
            -1, True)  # enable state collection for all cells
        model2 = model_type(
            model
        )  # make a copy, so that we in the stepwise run below get a clean copy with all values zero.
        opt_model = pt_gs_k.create_opt_model_clone(
            model)  # this is how to make a model suitable for optimizer
        model.run_cells(
        )  # the default arguments applies: thread_cell_count=0,start_step=0,n_steps=0)
        cids = api.IntVector(
        )  # optional, we can add selective catchment_ids here
        sum_discharge = model.statistics.discharge(cids)
        sum_discharge_value = model.statistics.discharge_value(
            cids, 0)  # at the first timestep
        sum_charge = model.statistics.charge(cids)
        sum_charge_value = model.statistics.charge_value(cids, 0)
        ae_output = model.actual_evaptranspiration_response.output(cids)
        ae_pot_ratio = model.actual_evaptranspiration_response.pot_ratio(cids)
        self.assertIsNotNone(ae_output)
        self.assertAlmostEqual(ae_output.values.to_numpy().max(),
                               0.189214067680088)
        self.assertIsNotNone(ae_pot_ratio)
        self.assertAlmostEqual(ae_pot_ratio.values.to_numpy().min(),
                               0.9999330003895371)
        self.assertAlmostEqual(ae_pot_ratio.values.to_numpy().max(), 1.0)
        opt_model.run_cells(
        )  # starting out with the same state, same interpolated values, and region-parameters, we should get same results
        sum_discharge_opt_value = opt_model.statistics.discharge_value(cids, 0)
        self.assertAlmostEqual(
            sum_discharge_opt_value, sum_discharge_value,
            3)  # verify the opt_model clone gives same value
        self.assertGreaterEqual(sum_discharge_value, 130.0)
        opt_model.region_env.temperature[0].ts.set(
            0, 23.2
        )  # verify that region-env is different (no aliasing, a true copy is required)
        self.assertFalse(
            abs(model.region_env.temperature[0].ts.value(0) -
                opt_model.region_env.temperature[0].ts.value(0)) > 0.5)

        #
        # check values
        #
        self.assertIsNotNone(sum_discharge)
        # now, re-run the process in 24-hours steps x 10
        model.set_states(s0)  # restore state s0
        self.assertEqual(s0.size(), model.initial_state.size())
        for do_collect_state in [False, True]:
            model2.set_state_collection(
                -1, do_collect_state
            )  # issue reported by Yisak, prior to 21.3, this would crash
            model2.set_states(s0)
            # now  after fix, it works Ok
            for section in range(10):
                model2.run_cells(use_ncore=0,
                                 start_step=section * 24,
                                 n_steps=24)
                section_discharge = model2.statistics.discharge(cids)
                self.assertEqual(section_discharge.size(), sum_discharge.size(
                ))  # notice here that the values after current step are 0.0
        stepwise_sum_discharge = model2.statistics.discharge(cids)
        # assert stepwise_sum_discharge == sum_discharge
        diff_ts = sum_discharge.values.to_numpy(
        ) - stepwise_sum_discharge.values.to_numpy()
        self.assertAlmostEqual((diff_ts * diff_ts).max(), 0.0, 4)
        # Verify that if we pass in illegal cids, then it raises exception(with first failing
        try:
            illegal_cids = api.IntVector([0, 4, 5])
            model.statistics.discharge(illegal_cids)
            self.assertFalse(
                True, "Failed test, using illegal cids should raise exception")
        except RuntimeError as rte:
            pass

        avg_temperature = model.statistics.temperature(cids)
        avg_precipitation = model.statistics.precipitation(cids)
        self.assertIsNotNone(avg_precipitation)
        for time_step in range(time_axis.size()):
            precip_raster = model.statistics.precipitation(
                cids, time_step)  # example raster output
            self.assertEqual(precip_raster.size(), num_cells)
        # example single value spatial aggregation (area-weighted) over cids for a specific timestep
        avg_gs_sc_value = model.gamma_snow_response.sca_value(cids, 1)
        self.assertGreaterEqual(avg_gs_sc_value, 0.0)
        avg_gs_sca = model.gamma_snow_response.sca(cids)  # swe output
        self.assertIsNotNone(avg_gs_sca)
        # lwc surface_heat alpha melt_mean melt iso_pot_energy temp_sw
        avg_gs_albedo = model.gamma_snow_state.albedo(cids)
        self.assertIsNotNone(avg_gs_albedo)
        self.assertEqual(avg_temperature.size(), time_axis.size(),
                         "expect results equal to time-axis size")
        copy_region_model = model.__class__(model)
        self.assertIsNotNone(copy_region_model)
        copy_region_model.run_cells(
        )  # just to verify we can copy and run the new model
        #
        # Play with routing and river-network
        #
        # 1st: add a river, with 36.000 meter hydro length, a UHGParameter with 1m/hour speed, alpha/beta suitable
        model.river_network.add(
            api.River(1, api.RoutingInfo(0, 3000.0),
                      api.UHGParameter(1 / 3.60, 7.0, 0.0)))  # river id =1
        # 2nd: let cells route to the river
        model.connect_catchment_to_river(
            0, 1)  # now all cells in catchment 0 routes to river with id 1.
        self.assertTrue(model.has_routing())
        # 3rd: now we can have a look at water coming in and out
        river_out_m3s = model.river_output_flow_m3s(
            1)  # should be delayed and reshaped
        river_local_m3s = model.river_local_inflow_m3s(
            1
        )  # should be equal to cell outputs (no routing stuff from cell to river)
        river_upstream_inflow_m3s = model.river_upstream_inflow_m3s(
            1
        )  # should be 0.0 in this case, since we do not have a routing network
        self.assertIsNotNone(river_out_m3s)
        self.assertAlmostEqual(river_out_m3s.value(8), 31.57297, 0)
        self.assertIsNotNone(river_local_m3s)
        self.assertIsNotNone(river_upstream_inflow_m3s)
        model.connect_catchment_to_river(0, 0)
        self.assertFalse(model.has_routing())
        #
        # Test the state-adjustments interfaces
        #

        q_0 = model.cells[0].state.kirchner.q
        model.adjust_q(2.0, cids)
        q_1 = model.cells[0].state.kirchner.q
        self.assertAlmostEqual(q_0 * 2.0, q_1)
        model.revert_to_initial_state()  # ensure we have a known state
        model.run_cells(0, 10, 1)  # just run step 10
        q_avg = model.statistics.discharge_value(
            cids, 10)  # get out the discharge for step 10
        x = 0.7  # we want x*q_avg as target
        model.revert_to_initial_state(
        )  # important, need start state for the test here
        adjust_result = model.adjust_state_to_target_flow(
            x * q_avg,
            cids,
            start_step=10,
            scale_range=3.0,
            scale_eps=1e-3,
            max_iter=350
        )  # This is how to adjust state to observed average flow for cids for tstep 10
        self.assertEqual(len(adjust_result.diagnostics),
                         0)  # diag should be len(0) if ok.
        self.assertAlmostEqual(adjust_result.q_r, q_avg * x,
                               3)  # verify we reached target
        self.assertAlmostEqual(adjust_result.q_0, q_avg, 3)  # .q_0,

        def test_optimization_model(self):
            num_cells = 20
            model_type = pt_gs_k.PTGSKOptModel
            model = self.build_model(model_type, pt_gs_k.PTGSKParameter,
                                     num_cells)
            cal = api.Calendar()
            t0 = cal.time(2015, 1, 1, 0, 0, 0)
            dt = api.deltahours(1)
            n = 240
            time_axis = api.TimeAxisFixedDeltaT(t0, dt, n)
            model_interpolation_parameter = api.InterpolationParameter()
            model.initialize_cell_environment(
                time_axis
            )  # just show how we can split the run_interpolation into two calls(second one optional)
            model.interpolate(
                model_interpolation_parameter,
                self.create_dummy_region_environment(
                    time_axis,
                    model.get_cells()[int(num_cells / 2)].geo.mid_point()))
            s0 = pt_gs_k.PTGSKStateVector()
            for i in range(num_cells):
                si = pt_gs_k.PTGSKState()
                si.kirchner.q = 40.0
                s0.append(si)
            model.set_states(
                s0
            )  # at this point the intial state of model is established as well
            model.run_cells()
            cids = api.IntVector.from_numpy(
                [0])  # optional, we can add selective catchment_ids here
            sum_discharge = model.statistics.discharge(cids)
            sum_discharge_value = model.statistics.discharge_value(
                cids, 0)  # at the first timestep
            self.assertGreaterEqual(sum_discharge_value, 130.0)
            # verify we can construct an optimizer
            optimizer = model_type.optimizer_t(
                model
            )  # notice that a model type know it's optimizer type, e.g. PTGSKOptimizer
            self.assertIsNotNone(optimizer)
            #
            # create target specification
            #
            model.revert_to_initial_state(
            )  # set_states(s0)  # remember to set the s0 again, so we have the same initial condition for our game
            tsa = api.TsTransform().to_average(t0, dt, n, sum_discharge)
            t_spec_1 = api.TargetSpecificationPts(tsa, cids, 1.0,
                                                  api.KLING_GUPTA, 1.0, 0.0,
                                                  0.0, api.DISCHARGE,
                                                  'test_uid')

            target_spec = api.TargetSpecificationVector()
            target_spec.append(t_spec_1)
            upper_bound = model_type.parameter_t(model.get_region_parameter(
            ))  # the model_type know it's parameter_t
            lower_bound = model_type.parameter_t(model.get_region_parameter())
            upper_bound.kirchner.c1 = -1.9
            lower_bound.kirchner.c1 = -3.0
            upper_bound.kirchner.c2 = 0.99
            lower_bound.kirchner.c2 = 0.80

            optimizer.set_target_specification(target_spec, lower_bound,
                                               upper_bound)
            # Not needed, it will automatically get one.
            # optimizer.establish_initial_state_from_model()
            # s0_0 = optimizer.get_initial_state(0)
            # optimizer.set_verbose_level(1000)
            p0 = model_type.parameter_t(model.get_region_parameter())
            orig_c1 = p0.kirchner.c1
            orig_c2 = p0.kirchner.c2
            # model.get_cells()[0].env_ts.precipitation.set(0, 5.1)
            # model.get_cells()[0].env_ts.precipitation.set(1, 4.9)
            p0.kirchner.c1 = -2.4
            p0.kirchner.c2 = 0.91
            opt_param = optimizer.optimize(p0, 1500, 0.1, 1e-5)
            goal_fx = optimizer.calculate_goal_function(opt_param)
            p0.kirchner.c1 = -2.4
            p0.kirchner.c2 = 0.91
            # goal_fx1 = optimizer.calculate_goal_function(p0)

            self.assertLessEqual(goal_fx, 10.0)
            self.assertAlmostEqual(orig_c1, opt_param.kirchner.c1, 4)
            self.assertAlmostEqual(orig_c2, opt_param.kirchner.c2, 4)
    def test_hbv_model_initialize_and_run(self):
        num_cells = 20
        model_type = hbv_stack.HbvModel
        model = self.build_model(model_type, hbv_stack.HbvParameter, num_cells)
        self.assertEqual(model.size(), num_cells)
        opt_model = model.create_opt_model_clone()
        self.assertIsNotNone(opt_model)
        # now modify snow_cv forest_factor to 0.1
        region_parameter = model.get_region_parameter()
        # region_parameter.gs.snow_cv_forest_factor = 0.1
        # region_parameter.gs.snow_cv_altitude_factor = 0.0001
        # self.assertEqual(region_parameter.gs.snow_cv_forest_factor, 0.1)
        # self.assertEqual(region_parameter.gs.snow_cv_altitude_factor, 0.0001)

        # self.assertAlmostEqual(region_parameter.gs.effective_snow_cv(1.0, 0.0), region_parameter.gs.snow_cv + 0.1)
        # self.assertAlmostEqual(region_parameter.gs.effective_snow_cv(1.0, 1000.0), region_parameter.gs.snow_cv + 0.1 + 0.1)
        cal = api.Calendar()
        time_axis = api.TimeAxisFixedDeltaT(cal.time(2015, 1, 1, 0, 0, 0),
                                            api.deltahours(1), 240)
        model_interpolation_parameter = api.InterpolationParameter()
        # degC/m, so -0.5 degC/100m
        model_interpolation_parameter.temperature_idw.default_temp_gradient = -0.005
        # if possible use closest neighbor points and solve gradient using equation,(otherwise default min/max height)
        model_interpolation_parameter.temperature_idw.gradient_by_equation = True
        # Max number of temperature sources used for one interpolation
        model_interpolation_parameter.temperature_idw.max_members = 6
        # 20 km is max distance
        model_interpolation_parameter.temperature_idw.max_distance = 20000
        # zscale is used to discriminate neighbors at different elevation than target point
        self.assertAlmostEqual(
            model_interpolation_parameter.temperature_idw.zscale, 1.0)
        model_interpolation_parameter.temperature_idw.zscale = 0.5
        self.assertAlmostEqual(
            model_interpolation_parameter.temperature_idw.zscale, 0.5)
        # Pure linear interpolation
        model_interpolation_parameter.temperature_idw.distance_measure_factor = 1.0
        # This enables IDW with default temperature gradient.
        model_interpolation_parameter.use_idw_for_temperature = True
        self.assertAlmostEqual(
            model_interpolation_parameter.precipitation.scale_factor,
            1.02)  # just verify this one is as before change to scale_factor
        model.run_interpolation(
            model_interpolation_parameter, time_axis,
            self.create_dummy_region_environment(
                time_axis,
                model.get_cells()[int(num_cells / 2)].geo.mid_point()))
        s0 = hbv_stack.HbvStateVector()
        for i in range(num_cells):
            si = hbv_stack.HbvState()
            si.tank.uz = 40.0
            si.tank.lz = 40.0
            s0.append(si)
        model.set_states(s0)
        model.set_state_collection(-1, False)  # with out collection
        model.run_cells()
        model.set_states(s0)
        model.set_state_collection(-1, True)  # with collection
        model.run_cells()
        cids = api.IntVector(
        )  # optional, we can add selective catchment_ids here
        sum_discharge = model.statistics.discharge(cids)
        sum_discharge_value = model.statistics.discharge_value(
            cids, 0)  # at the first timestep
        self.assertGreaterEqual(sum_discharge_value, 32.0)
        self.assertIsNotNone(sum_discharge)
        # Verify that if we pass in illegal cids, then it raises exception(with first failing
        try:
            illegal_cids = api.IntVector([0, 4, 5])
            model.statistics.discharge(illegal_cids)
            self.assertFalse(
                True, "Failed test, using illegal cids should raise exception")
        except RuntimeError as rte:
            pass

        avg_temperature = model.statistics.temperature(cids)
        avg_precipitation = model.statistics.precipitation(cids)
        self.assertIsNotNone(avg_precipitation)
        for time_step in range(time_axis.size()):
            precip_raster = model.statistics.precipitation(
                cids, time_step)  # example raster output
            self.assertEqual(precip_raster.size(), num_cells)
        # example single value spatial aggregation (area-weighted) over cids for a specific timestep
        # avg_gs_sc_value = model.gamma_snow_response.sca_value(cids, 1)
        # self.assertGreaterEqual(avg_gs_sc_value,0.0)
        # avg_gs_sca = model.gamma_snow_response.sca(cids)  # swe output
        # self.assertIsNotNone(avg_gs_sca)
        # lwc surface_heat alpha melt_mean melt iso_pot_energy temp_sw
        # avg_gs_albedo = model.gamma_snow_state.albedo(cids)
        # self.assertIsNotNone(avg_gs_albedo)
        self.assertEqual(avg_temperature.size(), time_axis.size(),
                         "expect results equal to time-axis size")
        copy_region_model = model.__class__(model)
        self.assertIsNotNone(copy_region_model)
        copy_region_model.run_cells(
        )  # just to verify we can copy and run the new model
    def test_model_initialize_and_run(self):
        num_cells = 20
        model_type = pt_gs_k.PTGSKModel
        model = self.build_model(model_type,
                                 pt_gs_k.PTGSKParameter,
                                 num_cells,
                                 num_catchments=1)
        self.assertEqual(model.size(), num_cells)
        self.verify_state_handler(model)
        # demo of feature for threads
        self.assertGreaterEqual(model.ncore,
                                1)  # defaults to hardware concurrency
        model.ncore = 4  # set it to 4, and
        self.assertEqual(model.ncore, 4)  # verify it works

        # now modify snow_cv forest_factor to 0.1
        region_parameter = model.get_region_parameter()
        region_parameter.gs.snow_cv_forest_factor = 0.1
        region_parameter.gs.snow_cv_altitude_factor = 0.0001
        self.assertEqual(region_parameter.gs.snow_cv_forest_factor, 0.1)
        self.assertEqual(region_parameter.gs.snow_cv_altitude_factor, 0.0001)

        self.assertAlmostEqual(region_parameter.gs.effective_snow_cv(1.0, 0.0),
                               region_parameter.gs.snow_cv + 0.1)
        self.assertAlmostEqual(
            region_parameter.gs.effective_snow_cv(1.0, 1000.0),
            region_parameter.gs.snow_cv + 0.1 + 0.1)
        cal = api.Calendar()
        time_axis = api.TimeAxisFixedDeltaT(cal.time(2015, 1, 1, 0, 0, 0),
                                            api.deltahours(1), 240)
        model_interpolation_parameter = api.InterpolationParameter()
        # degC/m, so -0.5 degC/100m
        model_interpolation_parameter.temperature_idw.default_temp_gradient = -0.005
        # if possible use closest neighbor points and solve gradient using equation,(otherwise default min/max height)
        model_interpolation_parameter.temperature_idw.gradient_by_equation = True
        # Max number of temperature sources used for one interpolation
        model_interpolation_parameter.temperature_idw.max_members = 6
        # 20 km is max distance
        model_interpolation_parameter.temperature_idw.max_distance = 20000
        # zscale is used to discriminate neighbors at different elevation than target point
        self.assertAlmostEqual(
            model_interpolation_parameter.temperature_idw.zscale, 1.0)
        model_interpolation_parameter.temperature_idw.zscale = 0.5
        self.assertAlmostEqual(
            model_interpolation_parameter.temperature_idw.zscale, 0.5)
        # Pure linear interpolation
        model_interpolation_parameter.temperature_idw.distance_measure_factor = 1.0
        # This enables IDW with default temperature gradient.
        model_interpolation_parameter.use_idw_for_temperature = True
        self.assertAlmostEqual(
            model_interpolation_parameter.precipitation.scale_factor,
            1.02)  # just verify this one is as before change to scale_factor
        model.initialize_cell_environment(
            time_axis
        )  # just show how we can split the run_interpolation into two calls(second one optional)
        model.interpolate(
            model_interpolation_parameter,
            self.create_dummy_region_environment(
                time_axis,
                model.get_cells()[int(num_cells / 2)].geo.mid_point()))
        m_ip_parameter = model.interpolation_parameter  # illustrate that we can get back the passed interpolation parameter as a property of the model
        self.assertEqual(
            m_ip_parameter.use_idw_for_temperature,
            True)  # just to ensure we really did get back what we passed in
        self.assertAlmostEqual(m_ip_parameter.temperature_idw.zscale, 0.5)
        #
        # Section to demo that we can ensure that model.cells[].env_ts.xxx
        # have finite-values only
        #
        for env_ts_x in [
                model.cells[0].env_ts.temperature,
                model.cells[0].env_ts.precipitation,
                model.cells[0].env_ts.rel_hum, model.cells[0].env_ts.radiation,
                model.cells[0].env_ts.wind_speed
        ]:
            self.assertTrue(
                model.is_cell_env_ts_ok()
            )  # demon how to verify that cell.env_ts can be checked prior to run
            vx = env_ts_x.value(0)  # save value
            env_ts_x.set(0, float('nan'))  # insert a nan
            self.assertFalse(model.is_cell_env_ts_ok()
                             )  # demo it returns false if nan @cell.env_ts
            env_ts_x.set(0, vx)  # insert back original value
            self.assertTrue(model.is_cell_env_ts_ok())  # ready to run again

        s0 = pt_gs_k.PTGSKStateVector()
        for i in range(num_cells):
            si = pt_gs_k.PTGSKState()
            si.kirchner.q = 40.0
            s0.append(si)
        model.set_states(s0)
        model.set_state_collection(
            -1, True)  # enable state collection for all cells
        model2 = model_type(
            model
        )  # make a copy, so that we in the stepwise run below get a clean copy with all values zero.
        opt_model = pt_gs_k.create_opt_model_clone(
            model)  # this is how to make a model suitable for optimizer
        model.run_cells(
        )  # the default arguments applies: thread_cell_count=0,start_step=0,n_steps=0)
        cids = api.IntVector(
        )  # optional, we can add selective catchment_ids here
        sum_discharge = model.statistics.discharge(cids)
        sum_discharge_value = model.statistics.discharge_value(
            cids, 0)  # at the first timestep
        sum_charge = model.statistics.charge(cids)
        sum_charge_value = model.statistics.charge_value(cids, 0)
        self.assertAlmostEqual(sum_charge_value, -110.6998, places=2)
        self.assertAlmostEqual(sum_charge.values[0],
                               sum_charge_value,
                               places=5)
        cell_charge = model.statistics.charge_value(
            api.IntVector([0, 1, 3]), 0, ix_type=api.stat_scope.cell)
        self.assertAlmostEqual(cell_charge, -16.7138, places=2)
        charge_sum_1_2_6 = model.statistics.charge(
            api.IntVector([1, 2, 6]),
            ix_type=api.stat_scope.cell).values.to_numpy().sum()
        self.assertAlmostEqual(charge_sum_1_2_6, 107.3981, places=2)
        ae_output = model.actual_evaptranspiration_response.output(cids)
        ae_pot_ratio = model.actual_evaptranspiration_response.pot_ratio(cids)
        self.assertIsNotNone(ae_output)
        self.assertAlmostEqual(ae_output.values.to_numpy().max(),
                               0.189214067680088)
        self.assertIsNotNone(ae_pot_ratio)
        self.assertAlmostEqual(ae_pot_ratio.values.to_numpy().min(),
                               0.9995599424191931)
        self.assertAlmostEqual(ae_pot_ratio.values.to_numpy().max(), 1.0)
        opt_model.run_cells(
        )  # starting out with the same state, same interpolated values, and region-parameters, we should get same results
        sum_discharge_opt_value = opt_model.statistics.discharge_value(cids, 0)
        self.assertAlmostEqual(
            sum_discharge_opt_value, sum_discharge_value,
            3)  # verify the opt_model clone gives same value
        self.assertGreaterEqual(sum_discharge_value, 130.0)
        opt_model.region_env.temperature[0].ts.set(
            0, 23.2
        )  # verify that region-env is different (no aliasing, a true copy is required)
        self.assertFalse(
            abs(model.region_env.temperature[0].ts.value(0) -
                opt_model.region_env.temperature[0].ts.value(0)) > 0.5)

        #
        # check values
        #
        self.assertIsNotNone(sum_discharge)
        # now, re-run the process in 24-hours steps x 10
        model.set_states(s0)  # restore state s0
        self.assertEqual(s0.size(), model.initial_state.size())
        for do_collect_state in [False, True]:
            model2.set_state_collection(
                -1, do_collect_state
            )  # issue reported by Yisak, prior to 21.3, this would crash
            model2.set_states(s0)
            # now  after fix, it works Ok
            for section in range(10):
                model2.run_cells(use_ncore=0,
                                 start_step=section * 24,
                                 n_steps=24)
                section_discharge = model2.statistics.discharge(cids)
                self.assertEqual(section_discharge.size(), sum_discharge.size(
                ))  # notice here that the values after current step are 0.0
        stepwise_sum_discharge = model2.statistics.discharge(cids)
        # assert stepwise_sum_discharge == sum_discharge
        diff_ts = sum_discharge.values.to_numpy(
        ) - stepwise_sum_discharge.values.to_numpy()
        self.assertAlmostEqual((diff_ts * diff_ts).max(), 0.0, 4)
        # Verify that if we pass in illegal cids, then it raises exception(with first failing
        try:
            illegal_cids = api.IntVector([0, 4, 5])
            model.statistics.discharge(illegal_cids)
            self.assertFalse(
                True, "Failed test, using illegal cids should raise exception")
        except RuntimeError as rte:
            pass

        avg_temperature = model.statistics.temperature(cids)
        avg_precipitation = model.statistics.precipitation(cids)
        self.assertIsNotNone(avg_precipitation)
        for time_step in range(time_axis.size()):
            precip_raster = model.statistics.precipitation(
                cids, time_step)  # example raster output
            self.assertEqual(precip_raster.size(), num_cells)
        # example single value spatial aggregation (area-weighted) over cids for a specific timestep
        avg_gs_sc_value = model.gamma_snow_response.sca_value(cids, 1)
        self.assertGreaterEqual(avg_gs_sc_value, 0.0)
        avg_gs_sca = model.gamma_snow_response.sca(cids)  # swe output
        self.assertIsNotNone(avg_gs_sca)
        # lwc surface_heat alpha melt_mean melt iso_pot_energy temp_sw
        avg_gs_albedo = model.gamma_snow_state.albedo(cids)
        self.assertIsNotNone(avg_gs_albedo)
        self.assertEqual(avg_temperature.size(), time_axis.size(),
                         "expect results equal to time-axis size")
        copy_region_model = model.__class__(model)
        self.assertIsNotNone(copy_region_model)
        copy_region_model.run_cells(
        )  # just to verify we can copy and run the new model
        #
        # Play with routing and river-network
        #
        # 1st: add a river, with 36.000 meter hydro length, a UHGParameter with 1m/hour speed, alpha/beta suitable
        model.river_network.add(
            api.River(1, api.RoutingInfo(0, 3000.0),
                      api.UHGParameter(1 / 3.60, 7.0, 0.0)))  # river id =1
        # 2nd: let cells route to the river
        model.connect_catchment_to_river(
            1, 1)  # now all cells in catchment 1 routes to river with id 1.
        self.assertTrue(model.has_routing())
        # 3rd: now we can have a look at water coming in and out
        river_out_m3s = model.river_output_flow_m3s(
            1)  # should be delayed and reshaped
        river_local_m3s = model.river_local_inflow_m3s(
            1
        )  # should be equal to cell outputs (no routing stuff from cell to river)
        river_upstream_inflow_m3s = model.river_upstream_inflow_m3s(
            1
        )  # should be 0.0 in this case, since we do not have a routing network
        self.assertIsNotNone(river_out_m3s)
        self.assertAlmostEqual(river_out_m3s.value(8), 28.061248025828114, 0)
        self.assertIsNotNone(river_local_m3s)
        self.assertIsNotNone(river_upstream_inflow_m3s)
        model.connect_catchment_to_river(1, 0)
        self.assertFalse(model.has_routing())
        #
        # Test the state-adjustments interfaces
        #

        q_0 = model.cells[0].state.kirchner.q
        model.adjust_q(2.0, cids)
        q_1 = model.cells[0].state.kirchner.q
        self.assertAlmostEqual(q_0 * 2.0, q_1)
        model.revert_to_initial_state()  # ensure we have a known state
        model.run_cells(0, 10, 2)  # just run step 10 and 11
        q_avg = (model.statistics.discharge_value(cids, 10) +
                 model.statistics.discharge_value(cids, 11)
                 ) / 2.0  # get out the discharge for step 10 and 11
        x = 0.7  # we want x*q_avg as target
        model.revert_to_initial_state(
        )  # important, need start state for the test here
        adjust_result = model.adjust_state_to_target_flow(
            x * q_avg,
            cids,
            start_step=10,
            scale_range=3.0,
            scale_eps=1e-3,
            max_iter=350,
            n_steps=2
        )  # This is how to adjust state to observed average flow for cids for tstep 10
        self.assertEqual(len(adjust_result.diagnostics),
                         0)  # diag should be len(0) if ok.
        self.assertAlmostEqual(adjust_result.q_r, q_avg * x,
                               2)  # verify we reached target
        self.assertAlmostEqual(adjust_result.q_0, q_avg, 2)  # .q_0,
        # now verify what happens if we put in bad values for observed value
        adjust_result = model.adjust_state_to_target_flow(float('nan'),
                                                          cids,
                                                          start_step=10,
                                                          scale_range=3.0,
                                                          scale_eps=1e-3,
                                                          max_iter=300,
                                                          n_steps=2)
        assert len(adjust_result.diagnostics
                   ) > 0, 'expect diagnostics length be larger than 0'
        # then verify what happens if we put in bad values on simulated result
        model.cells[0].env_ts.temperature.set(10, float('nan'))
        adjust_result = model.adjust_state_to_target_flow(30.0,
                                                          cids,
                                                          start_step=10,
                                                          scale_range=3.0,
                                                          scale_eps=1e-3,
                                                          max_iter=300,
                                                          n_steps=2)
        assert len(adjust_result.diagnostics
                   ) > 0, 'expect diagnostics length be larger than 0'
Beispiel #28
0
    def run_calibration(self, model_t):
        def param_obj_2_dict(p_obj):
            p_dict = {}
            [
                p_dict[r].update({p: getattr(getattr(p_obj, r), p)})
                if r in p_dict else
                p_dict.update({r: {
                    p: getattr(getattr(p_obj, r), p)
                }}) for r, p in [
                    nm.split('.')
                    for nm in [p_obj.get_name(i) for i in range(p_obj.size())]
                ]
            ]
            return p_dict

        # set up configuration
        cfg = YAMLSimConfig(self.sim_config_file,
                            "neanidelva",
                            overrides={
                                'model': {
                                    'model_t':
                                    model_t,
                                    'model_parameters':
                                    param_obj_2_dict(model_t.parameter_t())
                                }
                            })

        # create a simulator
        simulator = DefaultSimulator(cfg.region_model_id,
                                     cfg.interpolation_id,
                                     cfg.get_region_model_repo(),
                                     cfg.get_geots_repo(),
                                     cfg.get_interp_repo(),
                                     initial_state_repository=None,
                                     catchments=None)
        time_axis = cfg.time_axis.__class__(cfg.time_axis.start,
                                            cfg.time_axis.delta_t, 2000)
        state_repos = DefaultStateRepository(simulator.region_model)
        s0 = state_repos.get_state(0)
        param = simulator.region_model.get_region_parameter()
        cid = 1228
        simulator.region_model.set_calculation_filter(api.IntVector(
            [cid]), api.IntVector())  # only this sub-catchment
        # not needed, we auto initialize to default if not done explicitely
        # if model_t in [pt_hs_k.PTHSKOptModel]:
        #    for i in range(len(s0)):
        #        s0[i].snow.distribute(param.hs)
        simulator.run(time_axis=time_axis, state=s0)

        target_discharge_ts = simulator.region_model.statistics.discharge(
            [cid])
        cell_charge = simulator.region_model.get_cells(
        )[603].rc.avg_charge  # in m3s for this cell
        assert cell_charge.values.to_numpy().max(
        ) > 0.001, 'some charge expected here'
        target_discharge_ts.set_point_interpretation(
            api.point_interpretation_policy.POINT_AVERAGE_VALUE)
        target_discharge = target_discharge_ts.average(
            target_discharge_ts.time_axis)
        # Perturb parameters
        p_vec_orig = [param.get(i) for i in range(param.size())]
        p_vec_min = p_vec_orig[:]
        p_vec_max = p_vec_orig[:]
        p_vec_guess = p_vec_orig[:]
        random.seed(0)
        p_names = []
        for i in range(2):
            p_names.append(param.get_name(i))
            p_vec_min[i] *= 0.9
            p_vec_max[i] *= 1.1
            p_vec_guess[i] = random.uniform(p_vec_min[i], p_vec_max[i])
            if p_vec_min[i] > p_vec_max[i]:
                p_vec_min[i], p_vec_max[i] = p_vec_max[i], p_vec_min[i]
        p_min = simulator.region_model.parameter_t()
        p_max = simulator.region_model.parameter_t()
        p_guess = simulator.region_model.parameter_t()
        p_min.set(p_vec_min)
        p_max.set(p_vec_max)
        p_guess.set(p_vec_guess)

        # Find parameters
        target_spec = api.TargetSpecificationPts(target_discharge,
                                                 api.IntVector([cid]), 1.0,
                                                 api.NASH_SUTCLIFFE)
        target_spec_vec = api.TargetSpecificationVector(
        )  # ([target_spec]) does not yet work
        target_spec_vec.append(target_spec)
        self.assertEqual(simulator.optimizer.trace_size,
                         0)  # before optmize, trace_size should be 0
        p_opt = simulator.optimize(time_axis, s0, target_spec_vec, p_guess,
                                   p_min, p_max)
        self.assertGreater(simulator.optimizer.trace_size,
                           0)  # after opt, some trace values should be there
        # the trace values are in the order of appearance 0...trace_size-1
        #
        goal_fn_values = simulator.optimizer.trace_goal_function_values.to_numpy(
        )  # all of them, as np array
        self.assertEqual(len(goal_fn_values), simulator.optimizer.trace_size)
        p_last = simulator.optimizer.trace_parameter(
            simulator.optimizer.trace_size -
            1)  # get out the last (not neccessary the best)
        self.assertIsNotNone(p_last)
        simulator.region_model.set_catchment_parameter(cid, p_opt)
        simulator.run(time_axis, s0)
        found_discharge = simulator.region_model.statistics.discharge([cid])

        t_vs = np.array([
            target_discharge.value(i) for i in range(target_discharge.size())
        ])
        t_ts = np.array([
            int(target_discharge.time(i))
            for i in range(target_discharge.size())
        ])
        f_vs = np.array(
            [found_discharge.value(i) for i in range(found_discharge.size())])
        f_ts = np.array([
            int(found_discharge.time(i)) for i in range(found_discharge.size())
        ])
        self.assertTrue(np.linalg.norm(t_ts - f_ts) < 1.0e-10)
        print(np.linalg.norm(t_vs - f_vs), np.abs(t_vs - f_vs).max())
        self.assertTrue(np.linalg.norm(t_vs - f_vs) < 1.0e-3)
Beispiel #29
0
    def test_snow_and_ground_water_response_calibration(self):
        """
        Test dual calibration strategy:
            * First fit the three Kirchner parameters for
              ground water response during July, August, and
              September.
            * Then fit two snow routine parameters (tx and max_water)
              from November to April.
        """
        # Simulation time axis
        year, month, day, hour = 2010, 9, 1, 0
        dt = api.deltahours(24)
        n_steps = 400
        utc = api.Calendar()  # No offset gives Utc
        t0 = utc.time(api.YMDhms(year, month, day, hour))
        time_axis = api.Timeaxis(t0, dt, n_steps)

        # Some fake ids
        region_id = 0
        interpolation_id = 0

        # Simulation coordinate system
        epsg = "32633"

        # Model
        model_t = pt_gs_k.PTGSKOptModel

        # Configs and repositories
        dataset_config_file = path.join(path.dirname(__file__), "netcdf",
                                        "atnsjoen_datasets.yaml")
        region_config_file = path.join(path.dirname(__file__), "netcdf",
                                       "atnsjoen_calibration_region.yaml")
        region_config = RegionConfig(region_config_file)
        model_config = ModelConfig(self.model_config_file)
        dataset_config = YamlContent(dataset_config_file)
        region_model_repository = RegionModelRepository(
            region_config, model_config, model_t, epsg)
        interp_repos = InterpolationParameterRepository(model_config)
        netcdf_geo_ts_repos = []
        for source in dataset_config.sources:
            station_file = source["params"]["stations_met"]
            netcdf_geo_ts_repos.append(
                GeoTsRepository(source["params"], station_file, ""))
        geo_ts_repository = GeoTsRepositoryCollection(netcdf_geo_ts_repos)

        # Construct target discharge series
        simulator = DefaultSimulator(region_id, interpolation_id,
                                     region_model_repository,
                                     geo_ts_repository, interp_repos, None)
        n_cells = simulator.region_model.size()
        state_repos = DefaultStateRepository(model_t, n_cells)
        simulator.run(time_axis, state_repos.get_state(0))
        cid = 1
        target_discharge = simulator.region_model.statistics.discharge([cid])

        # Construct kirchner parameters
        param = simulator.region_model.parameter_t(
            simulator.region_model.get_region_parameter())
        print_param("True solution", param)

        kirchner_param_min = simulator.region_model.parameter_t(param)
        kirchner_param_max = simulator.region_model.parameter_t(param)
        # Kichner parameters are quite abstract (no physical meaning), so simply scale them
        kirchner_param_min.kirchner.c1 *= 0.8
        kirchner_param_min.kirchner.c2 *= 0.8
        kirchner_param_min.kirchner.c3 *= 0.8
        kirchner_param_max.kirchner.c1 *= 1.2
        kirchner_param_max.kirchner.c2 *= 1.2
        kirchner_param_max.kirchner.c3 *= 1.2
        # kirchner_t_start = utc.time(api.YMDhms(2011, 4, 1, 0))
        # kirchner_time_axis = api.Timeaxis(kirchner_t_start, dt, 150)
        kirchner_time_axis = time_axis

        # Construct gamma snow parameters (realistic tx and max_lwc)
        gamma_snow_param_min = simulator.region_model.parameter_t(param)
        gamma_snow_param_max = simulator.region_model.parameter_t(param)
        gamma_snow_param_min.gs.tx = -1.0  # Min snow/rain temperature threshold
        gamma_snow_param_min.gs.max_water = 0.05  # Min 8% max water in snow in costal regions
        gamma_snow_param_max.gs.tx = 1.0
        gamma_snow_param_max.gs.max_water = 0.25  # Max 35% max water content, or we get too little melt
        gs_t_start = utc.time(api.YMDhms(2010, 11, 1, 0))
        gs_time_axis = api.Timeaxis(gs_t_start, dt, 250)
        # gs_time_axis = time_axis

        # Find parameters
        target_spec = api.TargetSpecificationPts(target_discharge,
                                                 api.IntVector([cid]), 1.0,
                                                 api.KLING_GUPTA)
        target_spec_vec = api.TargetSpecificationVector(
        )  # TODO: We currently dont fix list initializer for vectors
        target_spec_vec.append(target_spec)
        # Construct a fake, perturbed starting point for calibration
        p_vec = [param.get(i) for i in range(param.size())]
        for i, name in enumerate(
            [param.get_name(i) for i in range(len(p_vec))]):
            if name not in ("c1" "c2", "c3", "TX", "max_water"):
                next
            if name in ("c1", "c2", "c3"):
                p_vec[i] = random.uniform(0.8 * p_vec[i], 1.2 * p_vec[i])
            elif name == "TX":
                p_vec[i] = random.uniform(gamma_snow_param_min.gs.tx,
                                          gamma_snow_param_max.gs.tx)
            elif name == "max_water":
                p_vec[i] = random.uniform(gamma_snow_param_min.gs.max_water,
                                          gamma_snow_param_max.gs.max_water)
        param.set(p_vec)
        print_param("Initial guess", param)
        # Two pass optimization, once for the ground water response, and second time for
        kirchner_p_opt = simulator.optimize(kirchner_time_axis,
                                            state_repos.get_state(0),
                                            target_spec_vec, param,
                                            kirchner_param_min,
                                            kirchner_param_max)
        gamma_snow_p_opt = simulator.optimize(gs_time_axis,
                                              state_repos.get_state(0),
                                              target_spec_vec, kirchner_p_opt,
                                              gamma_snow_param_min,
                                              gamma_snow_param_max)
        print_param("Half way result", kirchner_p_opt)
        print_param("Result", gamma_snow_p_opt)

        simulator.region_model.set_catchment_parameter(cid, gamma_snow_p_opt)
        simulator.run(time_axis, state_repos.get_state(0))
        found_discharge = simulator.region_model.statistics.discharge([cid])

        t_vs = np.array(target_discharge.v)
        t_ts = np.array(
            [target_discharge.time(i) for i in range(target_discharge.size())])
        f_vs = np.array(found_discharge.v)
        f_ts = np.array(
            [found_discharge.time(i) for i in range(found_discharge.size())])
Beispiel #30
0
    def test_model_initialize_and_run(self):
        num_cells = 20
        model_type = pt_gs_k.PTGSKModel
        model = self.build_model(model_type, pt_gs_k.PTGSKParameter, num_cells)
        self.assertEqual(model.size(), num_cells)
        self.verify_state_handler(model)
        # demo of feature for threads
        self.assertGreaterEqual(model.ncore,
                                1)  # defaults to hardware concurrency
        model.ncore = 4  # set it to 4, and
        self.assertEqual(model.ncore, 4)  # verify it works

        # now modify snow_cv forest_factor to 0.1
        region_parameter = model.get_region_parameter()
        region_parameter.gs.snow_cv_forest_factor = 0.1
        region_parameter.gs.snow_cv_altitude_factor = 0.0001
        self.assertEqual(region_parameter.gs.snow_cv_forest_factor, 0.1)
        self.assertEqual(region_parameter.gs.snow_cv_altitude_factor, 0.0001)

        self.assertAlmostEqual(region_parameter.gs.effective_snow_cv(1.0, 0.0),
                               region_parameter.gs.snow_cv + 0.1)
        self.assertAlmostEqual(
            region_parameter.gs.effective_snow_cv(1.0, 1000.0),
            region_parameter.gs.snow_cv + 0.1 + 0.1)
        cal = api.Calendar()
        time_axis = api.TimeAxisFixedDeltaT(cal.time(2015, 1, 1, 0, 0, 0),
                                            api.deltahours(1), 240)
        model_interpolation_parameter = api.InterpolationParameter()
        # degC/m, so -0.5 degC/100m
        model_interpolation_parameter.temperature_idw.default_temp_gradient = -0.005
        # if possible use closest neighbor points and solve gradient using equation,(otherwise default min/max height)
        model_interpolation_parameter.temperature_idw.gradient_by_equation = True
        # Max number of temperature sources used for one interpolation
        model_interpolation_parameter.temperature_idw.max_members = 6
        # 20 km is max distance
        model_interpolation_parameter.temperature_idw.max_distance = 20000
        # zscale is used to discriminate neighbors at different elevation than target point
        self.assertAlmostEqual(
            model_interpolation_parameter.temperature_idw.zscale, 1.0)
        model_interpolation_parameter.temperature_idw.zscale = 0.5
        self.assertAlmostEqual(
            model_interpolation_parameter.temperature_idw.zscale, 0.5)
        # Pure linear interpolation
        model_interpolation_parameter.temperature_idw.distance_measure_factor = 1.0
        # This enables IDW with default temperature gradient.
        model_interpolation_parameter.use_idw_for_temperature = True
        self.assertAlmostEqual(
            model_interpolation_parameter.precipitation.scale_factor,
            1.02)  # just verify this one is as before change to scale_factor
        model.initialize_cell_environment(
            time_axis
        )  # just show how we can split the run_interpolation into two calls(second one optional)
        model.interpolate(
            model_interpolation_parameter,
            self.create_dummy_region_environment(
                time_axis,
                model.get_cells()[int(num_cells / 2)].geo.mid_point()))
        m_ip_parameter = model.interpolation_parameter  # illustrate that we can get back the passed interpolation parameter as a property of the model
        self.assertEqual(
            m_ip_parameter.use_idw_for_temperature,
            True)  # just to ensure we really did get back what we passed in
        self.assertAlmostEqual(m_ip_parameter.temperature_idw.zscale, 0.5)
        s0 = pt_gs_k.PTGSKStateVector()
        for i in range(num_cells):
            si = pt_gs_k.PTGSKState()
            si.kirchner.q = 40.0
            s0.append(si)
        model.set_states(s0)
        model.set_state_collection(
            -1, True)  # enable state collection for all cells
        model2 = model_type(
            model
        )  # make a copy, so that we in the stepwise run below get a clean copy with all values zero.
        opt_model = pt_gs_k.create_opt_model_clone(
            model)  # this is how to make a model suitable for optimizer
        model.run_cells(
        )  # the default arguments applies: thread_cell_count=0,start_step=0,n_steps=0)
        cids = api.IntVector(
        )  # optional, we can add selective catchment_ids here
        sum_discharge = model.statistics.discharge(cids)
        sum_discharge_value = model.statistics.discharge_value(
            cids, 0)  # at the first timestep
        sum_charge = model.statistics.charge(cids)
        sum_charge_value = model.statistics.charge_value(cids, 0)
        opt_model.run_cells(
        )  # starting out with the same state, same interpolated values, and region-parameters, we should get same results
        sum_discharge_opt_value = opt_model.statistics.discharge_value(cids, 0)
        self.assertAlmostEqual(
            sum_discharge_opt_value, sum_discharge_value,
            3)  # verify the opt_model clone gives same value
        self.assertGreaterEqual(sum_discharge_value, 130.0)
        opt_model.region_env.temperature[0].ts.set(
            0, 23.2
        )  # verify that region-env is different (no aliasing, a true copy is required)
        self.assertFalse(
            abs(model.region_env.temperature[0].ts.value(0) -
                opt_model.region_env.temperature[0].ts.value(0)) > 0.5)

        #
        # check values
        #
        self.assertIsNotNone(sum_discharge)
        # now, re-run the process in 24-hours steps x 10
        model.set_states(s0)  # restore state s0
        self.assertEqual(s0.size(), model.initial_state.size())
        for do_collect_state in [False, True]:
            model2.set_state_collection(
                -1, do_collect_state
            )  # issue reported by Yisak, prior to 21.3, this would crash
            model2.set_states(s0)
            # now  after fix, it works Ok
            for section in range(10):
                model2.run_cells(use_ncore=0,
                                 start_step=section * 24,
                                 n_steps=24)
                section_discharge = model2.statistics.discharge(cids)
                self.assertEqual(section_discharge.size(), sum_discharge.size(
                ))  # notice here that the values after current step are 0.0
        stepwise_sum_discharge = model2.statistics.discharge(cids)
        # assert stepwise_sum_discharge == sum_discharge
        diff_ts = sum_discharge.values.to_numpy(
        ) - stepwise_sum_discharge.values.to_numpy()
        self.assertAlmostEqual((diff_ts * diff_ts).max(), 0.0, 4)
        # Verify that if we pass in illegal cids, then it raises exception(with first failing
        try:
            illegal_cids = api.IntVector([0, 4, 5])
            model.statistics.discharge(illegal_cids)
            self.assertFalse(
                True, "Failed test, using illegal cids should raise exception")
        except RuntimeError as rte:
            pass

        avg_temperature = model.statistics.temperature(cids)
        avg_precipitation = model.statistics.precipitation(cids)
        self.assertIsNotNone(avg_precipitation)
        for time_step in range(time_axis.size()):
            precip_raster = model.statistics.precipitation(
                cids, time_step)  # example raster output
            self.assertEqual(precip_raster.size(), num_cells)
        # example single value spatial aggregation (area-weighted) over cids for a specific timestep
        avg_gs_sc_value = model.gamma_snow_response.sca_value(cids, 1)
        self.assertGreaterEqual(avg_gs_sc_value, 0.0)
        avg_gs_sca = model.gamma_snow_response.sca(cids)  # swe output
        self.assertIsNotNone(avg_gs_sca)
        # lwc surface_heat alpha melt_mean melt iso_pot_energy temp_sw
        avg_gs_albedo = model.gamma_snow_state.albedo(cids)
        self.assertIsNotNone(avg_gs_albedo)
        self.assertEqual(avg_temperature.size(), time_axis.size(),
                         "expect results equal to time-axis size")
        copy_region_model = model.__class__(model)
        self.assertIsNotNone(copy_region_model)
        copy_region_model.run_cells(
        )  # just to verify we can copy and run the new model
        #
        # Play with routing and river-network
        #
        # 1st: add a river, with 36.000 meter hydro length, a UHGParameter with 1m/hour speed, alpha/beta suitable
        model.river_network.add(
            api.River(1, api.RoutingInfo(0, 3000.0),
                      api.UHGParameter(1 / 3.60, 7.0, 0.0)))  # river id =1
        # 2nd: let cells route to the river
        model.connect_catchment_to_river(
            0, 1)  # now all cells in catchment 0 routes to river with id 1.
        self.assertTrue(model.has_routing())
        # 3rd: now we can have a look at water coming in and out
        river_out_m3s = model.river_output_flow_m3s(
            1)  # should be delayed and reshaped
        river_local_m3s = model.river_local_inflow_m3s(
            1
        )  # should be equal to cell outputs (no routing stuff from cell to river)
        river_upstream_inflow_m3s = model.river_upstream_inflow_m3s(
            1
        )  # should be 0.0 in this case, since we do not have a routing network
        self.assertIsNotNone(river_out_m3s)
        self.assertAlmostEqual(river_out_m3s.value(8), 31.57297, 0)
        self.assertIsNotNone(river_local_m3s)
        self.assertIsNotNone(river_upstream_inflow_m3s)
        model.connect_catchment_to_river(0, 0)
        self.assertFalse(model.has_routing())