def _create_target_specvect(self): self.tv = api.TargetSpecificationVector() tst = api.TsTransform() cid_map = self.region_model.catchment_id_map for ts_info in self._config.target_ts: cid = ts_info['catch_id'] # mapped_indx = [cid_map.index(ID) for ID in cid if ID in cid_map] # since ID to Index conversion not necessary found_indx = np.in1d(cid_map, cid) if np.count_nonzero(found_indx) != len(cid): raise ConfigSimulatorError( "Catchment index {} for target series {} not found.". format( ','.join([ str(val) for val in [i for i in cid if i not in cid_map] ]), ts_info['uid'])) catch_indx = api.IntVector(cid) tsp = ts_info['ts'] t = api.TargetSpecificationPts() t.catchment_indexes = catch_indx t.scale_factor = ts_info['weight'] t.calc_mode = self.obj_funcs[ts_info['obj_func']['name']] t.s_r = ts_info['obj_func']['scaling_factors']['s_corr'] t.s_a = ts_info['obj_func']['scaling_factors']['s_var'] t.s_b = ts_info['obj_func']['scaling_factors']['s_bias'] tsa = tst.to_average(ts_info['start_datetime'], ts_info['run_time_step'], ts_info['number_of_steps'], tsp) t.ts = tsa self.tv.append(t)
def _create_target_specvect(self): print("Creating TargetSpecificationVector...") tv = api.TargetSpecificationVector() tst = api.TsTransform() cid_map = self.region_model.catchment_id_map for repo in self.target_repo: tsp = repo['repository'].read([ts_info['uid'] for ts_info in repo['1D_timeseries']], self.time_axis.total_period()) for ts_info in repo['1D_timeseries']: if np.count_nonzero(np.in1d(cid_map, ts_info['catch_id'])) != len(ts_info['catch_id']): raise ConfigSimulatorError("Catchment ID {} for target series {} not found.".format( ','.join([str(val) for val in [i for i in ts_info['catch_id'] if i not in cid_map]]), ts_info['uid'])) period = api.UtcPeriod(ts_info['start_datetime'], ts_info['start_datetime'] + ts_info['number_of_steps'] * ts_info['run_time_step']) if not self.time_axis.total_period().contains(period): raise ConfigSimulatorError( "Period {} for target series {} is not within the calibration period {}.".format( period.to_string(), ts_info['uid'], self.time_axis.total_period().to_string())) #tsp = repo['repository'].read([ts_info['uid']], period)[ts_info['uid']] t = api.TargetSpecificationPts() t.uid = ts_info['uid'] t.catchment_indexes = api.IntVector(ts_info['catch_id']) t.scale_factor = ts_info['weight'] t.calc_mode = self.obj_funcs[ts_info['obj_func']['name']] [setattr(t, nm, ts_info['obj_func']['scaling_factors'][k]) for nm, k in zip(['s_r','s_a','s_b'], ['s_corr','s_var','s_bias'])] t.ts = api.TimeSeries(tst.to_average(ts_info['start_datetime'], ts_info['run_time_step'], ts_info['number_of_steps'], tsp[ts_info['uid']])) tv.append(t) return tv
def run_calibration(self, model_t): # set up configuration config_dir = path.join(path.dirname(__file__), "netcdf") cfg = orchestration.YAMLConfig( "atnsjoen_calibration.yaml", "atnsjoen", config_dir=config_dir, data_dir=shyftdata_dir, model_t=model_t) time_axis = cfg.time_axis # get a simulator simulator = cfg.get_simulator() n_cells = simulator.region_model.size() state_repos = DefaultStateRepository(cfg.model_t, n_cells) simulator.run(time_axis, state_repos.get_state(0)) cid = 1 target_discharge_ts = simulator.region_model.statistics.discharge([cid]) target_discharge = api.TsTransform().to_average(time_axis.time(0), time_axis.time(1)-time_axis.time(0), time_axis.size(), target_discharge_ts) # Perturb parameters param = simulator.region_model.get_region_parameter() p_vec_orig = [param.get(i) for i in range(param.size())] p_vec_min = p_vec_orig[:] p_vec_max = p_vec_orig[:] p_vec_guess = p_vec_orig[:] random.seed(0) p_names = [] for i in range(4): p_names.append(param.get_name(i)) p_vec_min[i] *= 0.5 p_vec_max[i] *= 1.5 p_vec_guess[i] = random.uniform(p_vec_min[i], p_vec_max[i]) if p_vec_min[i] > p_vec_max[i]: p_vec_min[i], p_vec_max[i] = p_vec_max[i], p_vec_min[i] p_min = simulator.region_model.parameter_t() p_max = simulator.region_model.parameter_t() p_guess = simulator.region_model.parameter_t() p_min.set(p_vec_min) p_max.set(p_vec_max) p_guess.set(p_vec_guess) # Find parameters target_spec = api.TargetSpecificationPts(target_discharge, api.IntVector([cid]), 1.0, api.KLING_GUPTA) target_spec_vec = api.TargetSpecificationVector() #([target_spec]) does not yet work target_spec_vec.append(target_spec) p_opt = simulator.optimize(time_axis, state_repos.get_state(0), target_spec_vec, p_guess, p_min, p_max) simulator.region_model.set_catchment_parameter(cid, p_opt) simulator.run(time_axis, state_repos.get_state(0)) found_discharge = simulator.region_model.statistics.discharge([cid]) t_vs = np.array([target_discharge.value(i) for i in range(target_discharge.size())]) t_ts = np.array([target_discharge.time(i) for i in range(target_discharge.size())]) f_vs = np.array([found_discharge.value(i) for i in range(found_discharge.size())]) f_ts = np.array([found_discharge.time(i) for i in range(found_discharge.size())]) self.assertTrue(np.linalg.norm(t_ts - f_ts) < 1.0e-10) self.assertTrue(np.linalg.norm(t_vs - f_vs) < 1.0e-3)
def _extraction_method_1d(self,ts_info): c_id = ts_info['catchment_id'] t_st, t_dt, t_n = ts_info['time_axis'].start, ts_info['time_axis'].delta_t, ts_info['time_axis'].size() tst = api.TsTransform() found_indx = np.in1d(self.region_model.catchment_id_map,c_id) if np.count_nonzero(found_indx) != len(c_id): raise ConfigSimulatorError( "Global catchment index {} not found.".format( ','.join([str(val) for val in [i for i in c_id if i not in self.region_model.catchment_id_map]]))) methods = {'discharge': lambda m: tst.to_average(t_st, t_dt, t_n, m.statistics.discharge(c_id))} return methods[ts_info['type']]
def test_create_TargetSpecificationPts(self): t = api.TargetSpecificationPts(); t.scale_factor = 1.0 t.calc_mode = api.NASH_SUTCLIFFE t.calc_mode = api.KLING_GUPTA; t.s_r = 1.0 # KGEs scale-factors t.s_a = 2.0 t.s_b = 3.0 self.assertAlmostEqual(t.scale_factor, 1.0) # create a ts with some points cal = api.Calendar(); start = cal.time(api.YMDhms(2015, 1, 1, 0, 0, 0)) dt = api.deltahours(1) tsf = api.TsFactory(); times = api.UtcTimeVector() times.push_back(start + 1 * dt); times.push_back(start + 3 * dt); times.push_back(start + 4 * dt) values = api.DoubleVector() values.push_back(1.0) values.push_back(3.0) values.push_back(np.nan) tsp = tsf.create_time_point_ts(api.UtcPeriod(start, start + 24 * dt), times, values) # convert it from a time-point ts( as returned from current smgrepository) to a fixed interval with timeaxis, needed by calibration tst = api.TsTransform() tsa = tst.to_average(start, dt, 24, tsp) # tsa2 = tst.to_average(start,dt,24,tsp,False) # tsa_staircase = tst.to_average_staircase(start,dt,24,tsp,False) # nans infects the complete interval to nan # tsa_staircase2 = tst.to_average_staircase(start,dt,24,tsp,True) # skip nans, nans are 0 # stuff it into the target spec. # also show how to specify snow-calibration cids = api.IntVector([0, 2, 3]) t2 = api.TargetSpecificationPts(tsa,cids, 0.7, api.KLING_GUPTA, 1.0, 1.0, 1.0, api.SNOW_COVERED_AREA) t2.catchment_property = api.SNOW_WATER_EQUIVALENT self.assertEqual(t2.catchment_property, api.SNOW_WATER_EQUIVALENT) self.assertIsNotNone(t2.catchment_indexes) for i in range(len(cids)): self.assertEqual(cids[i],t2.catchment_indexes[i]) t.ts = tsa #TODO: Does not work, list of objects are not yet convertible tv = api.TargetSpecificationVector([t, t2]) tv=api.TargetSpecificationVector() tv.append(t) tv.append(t2) # now verify we got something ok self.assertEqual(2, tv.size()) self.assertAlmostEqual(tv[0].ts.value(1), 1.5) # average value 0..1 ->0.5 self.assertAlmostEqual(tv[0].ts.value(2), 2.5) # average value 0..1 ->0.5 self.assertAlmostEqual(tv[0].ts.value(3), 3.0) # average value 0..1 ->0.5 # and that the target vector now have its own copy of ts tsa.set(1, 3.0) self.assertAlmostEqual(tv[0].ts.value(1), 1.5) # make sure the ts passed onto target spec, is a copy self.assertAlmostEqual(tsa.value(1), 3.0) # and that we really did change the source
def test_ts_transform(self): dv=np.arange(self.ta.size()) v=api.DoubleVector.from_numpy(dv) t=api.UtcTimeVector(); for i in range(self.ta.size()): t.push_back(self.ta(i).start) # t.push_back(self.ta(self.ta.size()-1).end) #important! needs n+1 points to determine n periods in the timeaxis t_start=self.ta.total_period().start dt=api.deltahours(1) tax=api.TimeAxisFixedDeltaT(t_start + api.deltaminutes(30), dt, self.ta.size()) tsf=api.TsFactory() ts1=tsf.create_point_ts(self.ta.size(), self.t, self.d, v) ts2=tsf.create_time_point_ts(self.ta.total_period(), t, v) ts3=api.TsFixed(tax, v, api.POINT_INSTANT_VALUE) tst=api.TsTransform() tt1=tst.to_average(t_start, dt, tax.size(), ts1) tt2=tst.to_average(t_start, dt, tax.size(), ts2) tt3=tst.to_average(t_start, dt, tax.size(), ts3) self.assertEqual(tt1.size(), tax.size()) self.assertEqual(tt2.size(), tax.size()) self.assertEqual(tt3.size(), tax.size())
def _create_target_specvect(self): self.tv = api.TargetSpecificationVector() tst = api.TsTransform() for ts_info in self._config.target_ts: mapped_indx = [ i for i, j in enumerate(self.region_model.catchment_id_map) if j in ts_info['catch_id'] ] catch_indx = api.IntVector(mapped_indx) tsp = ts_info['ts'] t = api.TargetSpecificationPts() t.catchment_indexes = catch_indx t.scale_factor = ts_info['weight'] t.calc_mode = self.obj_funcs[ts_info['obj_func']['name']] t.s_r = ts_info['obj_func']['scaling_factors']['s_corr'] t.s_a = ts_info['obj_func']['scaling_factors']['s_var'] t.s_b = ts_info['obj_func']['scaling_factors']['s_bias'] tsa = tst.to_average(ts_info['start_datetime'], ts_info['run_time_step'], ts_info['number_of_steps'], tsp) t.ts = tsa self.tv.append(t)
def test_create_TargetSpecificationPts(self): t = api.TargetSpecificationPts() t.scale_factor = 1.0 t.calc_mode = api.NASH_SUTCLIFFE t.calc_mode = api.KLING_GUPTA t.calc_mode = api.ABS_DIFF t.calc_mode = api.RMSE t.s_r = 1.0 # KGEs scale-factors t.s_a = 2.0 t.s_b = 3.0 self.assertIsNotNone(t.uid) t.uid = 'test' self.assertEqual(t.uid, 'test') self.assertAlmostEqual(t.scale_factor, 1.0) # create a ts with some points cal = api.Calendar() start = cal.time(2015, 1, 1, 0, 0, 0) dt = api.deltahours(1) tsf = api.TsFactory() times = api.UtcTimeVector() times.push_back(start + 1 * dt) times.push_back(start + 3 * dt) times.push_back(start + 4 * dt) values = api.DoubleVector() values.push_back(1.0) values.push_back(3.0) values.push_back(np.nan) tsp = tsf.create_time_point_ts(api.UtcPeriod(start, start + 24 * dt), times, values) # convert it from a time-point ts( as returned from current smgrepository) to a fixed interval with timeaxis, needed by calibration tst = api.TsTransform() tsa = tst.to_average(start, dt, 24, tsp) # tsa2 = tst.to_average(start,dt,24,tsp,False) # tsa_staircase = tst.to_average_staircase(start,dt,24,tsp,False) # nans infects the complete interval to nan # tsa_staircase2 = tst.to_average_staircase(start,dt,24,tsp,True) # skip nans, nans are 0 # stuff it into the target spec. # also show how to specify snow-calibration cids = api.IntVector([0, 2, 3]) t2 = api.TargetSpecificationPts(tsa, cids, 0.7, api.KLING_GUPTA, 1.0, 1.0, 1.0, api.SNOW_COVERED_AREA, 'test_uid') self.assertEqual(t2.uid, 'test_uid') t2.catchment_property = api.SNOW_WATER_EQUIVALENT self.assertEqual(t2.catchment_property, api.SNOW_WATER_EQUIVALENT) t2.catchment_property = api.CELL_CHARGE self.assertEqual(t2.catchment_property, api.CELL_CHARGE) self.assertIsNotNone(t2.catchment_indexes) for i in range(len(cids)): self.assertEqual(cids[i], t2.catchment_indexes[i]) t.ts = api.TimeSeries(tsa) # target spec is now a regular TimeSeries tv = api.TargetSpecificationVector() tv[:] = [t, t2] # now verify we got something ok self.assertEqual(2, tv.size()) self.assertAlmostEqual(tv[0].ts.value(1), 1.5) # average value 0..1 ->0.5 self.assertAlmostEqual(tv[0].ts.value(2), 2.5) # average value 0..1 ->0.5 # self.assertAlmostEqual(tv[0].ts.value(3), 3.0) # original flat out at end, but now: self.assertTrue(math.isnan( tv[0].ts.value(3))) # strictly linear between points. # and that the target vector now have its own copy of ts tsa.set(1, 3.0) self.assertAlmostEqual( tv[0].ts.value(1), 1.5) # make sure the ts passed onto target spec, is a copy self.assertAlmostEqual(tsa.value(1), 3.0) # and that we really did change the source # Create a clone of target specification vector tv2 = api.TargetSpecificationVector(tv) self.assertEqual(2, tv2.size()) self.assertAlmostEqual(tv2[0].ts.value(1), 1.5) # average value 0..1 ->0.5 self.assertAlmostEqual(tv2[0].ts.value(2), 2.5) # average value 0..1 ->0.5 self.assertTrue(math.isnan( tv2[0].ts.value(3))) # average value 0..1 ->0.5 tv2[0].scale_factor = 10.0 self.assertAlmostEqual(tv[0].scale_factor, 1.0) self.assertAlmostEqual(tv2[0].scale_factor, 10.0) # test we can create from breakpoint time-series ts_bp = api.TimeSeries(api.TimeAxis(api.UtcTimeVector([0, 25, 20]), 30), fill_value=2.0, point_fx=api.POINT_AVERAGE_VALUE) tspec_bp = api.TargetSpecificationPts(ts_bp, cids, 0.7, api.KLING_GUPTA, 1.0, 1.0, 1.0, api.CELL_CHARGE, 'test_uid') self.assertIsNotNone(tspec_bp)
def test_snow_and_ground_water_response_calibration(self): """ Test dual calibration strategy: * First fit the three Kirchner parameters for ground water response during July, August, and September. * Then fit two snow routine parameters (tx and max_water) from November to April. """ # Simulation time axis dt = api.deltahours(24) n_steps = 400 utc = api.Calendar() # No offset gives Utc t0 = utc.time(2010, 9, 1, 0) time_axis = api.Timeaxis(t0, dt, n_steps) # Some fake ids region_id = 0 interpolation_id = 0 # Simulation coordinate system epsg = "32633" # Model model_t = pt_gs_k.PTGSKOptModel # Configs and repositories dataset_config_file = path.join(path.dirname(__file__), "netcdf", "atnsjoen_datasets.yaml") region_config_file = path.join(path.dirname(__file__), "netcdf", "atnsjoen_calibration_region.yaml") region_config = RegionConfig(region_config_file) model_config = ModelConfig(self.model_config_file) dataset_config = YamlContent(dataset_config_file) region_model_repository = RegionModelRepository(region_config, model_config, model_t, epsg) interp_repos = InterpolationParameterRepository(model_config) netcdf_geo_ts_repos = [] for source in dataset_config.sources: station_file = source["params"]["stations_met"] netcdf_geo_ts_repos.append(GeoTsRepository(source["params"], station_file, "")) geo_ts_repository = GeoTsRepositoryCollection(netcdf_geo_ts_repos) # Construct target discharge series simulator = DefaultSimulator(region_id, interpolation_id, region_model_repository, geo_ts_repository, interp_repos, None) n_cells = simulator.region_model.size() state_repos = DefaultStateRepository(model_t, n_cells) simulator.run(time_axis, state_repos.get_state(0)) cid = 1 target_discharge = api.TsTransform().to_average(t0,dt,n_steps,simulator.region_model.statistics.discharge([cid])) # Construct kirchner parameters param = simulator.region_model.parameter_t(simulator.region_model.get_region_parameter()) print_param("True solution", param) kirchner_param_min = simulator.region_model.parameter_t(param) kirchner_param_max = simulator.region_model.parameter_t(param) # Kichner parameters are quite abstract (no physical meaning), so simply scale them kirchner_param_min.kirchner.c1 *= 0.8 kirchner_param_min.kirchner.c2 *= 0.8 kirchner_param_min.kirchner.c3 *= 0.8 kirchner_param_max.kirchner.c1 *= 1.2 kirchner_param_max.kirchner.c2 *= 1.2 kirchner_param_max.kirchner.c3 *= 1.2 # kirchner_t_start = utc.time(api.YMDhms(2011, 4, 1, 0)) # kirchner_time_axis = api.Timeaxis(kirchner_t_start, dt, 150) kirchner_time_axis = time_axis # Construct gamma snow parameters (realistic tx and max_lwc) gamma_snow_param_min = simulator.region_model.parameter_t(param) gamma_snow_param_max = simulator.region_model.parameter_t(param) gamma_snow_param_min.gs.tx = -1.0 # Min snow/rain temperature threshold gamma_snow_param_min.gs.max_water = 0.05 # Min 8% max water in snow in costal regions gamma_snow_param_max.gs.tx = 1.0 gamma_snow_param_max.gs.max_water = 0.25 # Max 35% max water content, or we get too little melt gs_t_start = utc.time(2010, 11, 1, 0) gs_time_axis = api.Timeaxis(gs_t_start, dt, 250) # gs_time_axis = time_axis # Find parameters target_spec = api.TargetSpecificationPts(target_discharge, api.IntVector([cid]), 1.0, api.KLING_GUPTA) target_spec_vec = api.TargetSpecificationVector() # TODO: We currently dont fix list initializer for vectors target_spec_vec.append(target_spec) # Construct a fake, perturbed starting point for calibration p_vec = [param.get(i) for i in range(param.size())] for i, name in enumerate([param.get_name(i) for i in range(len(p_vec))]): if name not in ("c1" "c2", "c3", "TX", "max_water"): next if name in ("c1", "c2", "c3"): p_vec[i] = random.uniform(0.8*p_vec[i], 1.2*p_vec[i]) elif name == "TX": p_vec[i] = random.uniform(gamma_snow_param_min.gs.tx, gamma_snow_param_max.gs.tx) elif name == "max_water": p_vec[i] = random.uniform(gamma_snow_param_min.gs.max_water, gamma_snow_param_max.gs.max_water) param.set(p_vec) print_param("Initial guess", param) # Two pass optimization, once for the ground water response, and second time for kirchner_p_opt = simulator.optimize(kirchner_time_axis, state_repos.get_state(0), target_spec_vec, param, kirchner_param_min, kirchner_param_max) gamma_snow_p_opt = simulator.optimize(gs_time_axis, state_repos.get_state(0), target_spec_vec, kirchner_p_opt, gamma_snow_param_min, gamma_snow_param_max) print_param("Half way result", kirchner_p_opt) print_param("Result", gamma_snow_p_opt) simulator.region_model.set_catchment_parameter(cid, gamma_snow_p_opt) simulator.run(time_axis, state_repos.get_state(0)) found_discharge = simulator.region_model.statistics.discharge([cid])
def _resample_1h(ts): period = ts.time_axis.total_period() n = period.timespan() // api.deltahours(1) return api.TsTransform().to_average(period.start, api.deltahours(1), n, ts)
def test_optimization_model(self): num_cells = 20 model_type = pt_gs_k.PTGSKModel opt_model_type = pt_gs_k.PTGSKOptModel model = self.build_model(model_type, pt_gs_k.PTGSKParameter, num_cells) cal = api.Calendar() t0 = cal.time(2015, 1, 1, 0, 0, 0) dt = api.deltahours(1) n = 240 time_axis = api.TimeAxisFixedDeltaT(t0, dt, n) model_interpolation_parameter = api.InterpolationParameter() model.initialize_cell_environment( time_axis ) # just show how we can split the run_interpolation into two calls(second one optional) model.interpolate( model_interpolation_parameter, self.create_dummy_region_environment( time_axis, model.get_cells()[int(num_cells / 2)].geo.mid_point())) s0 = pt_gs_k.PTGSKStateVector() for i in range(num_cells): si = pt_gs_k.PTGSKState() si.kirchner.q = 40.0 s0.append(si) model.set_snow_sca_swe_collection(-1, True) model.set_states( s0 ) # at this point the intial state of model is established as well model.run_cells() cids = api.IntVector.from_numpy( [1]) # optional, we can add selective catchment_ids here sum_discharge = model.statistics.discharge(cids) sum_discharge_value = model.statistics.discharge_value( cids, 0) # at the first timestep self.assertGreaterEqual(sum_discharge_value, 130.0) # verify we can construct an optimizer opt_model = model.create_opt_model_clone() opt_model.set_snow_sca_swe_collection( -1, True) # ensure to fill in swe/sca opt_model.run_cells() opt_sum_discharge = opt_model.statistics.discharge(cids) opt_swe = opt_model.statistics.snow_swe(cids) # how to get out swe/sca opt_swe_v = opt_model.statistics.snow_swe_value(cids, 0) opt_sca = opt_model.statistics.snow_sca(cids) for i in range(len(opt_model.time_axis)): opt_sca_v = opt_model.statistics.snow_sca_value(cids, i) self.assertTrue(0.0 <= opt_sca_v <= 1.0) self.assertIsNotNone(opt_sum_discharge) self.assertIsNotNone(opt_swe) self.assertIsNotNone(opt_sca) optimizer = opt_model_type.optimizer_t( opt_model ) # notice that a model type know it's optimizer type, e.g. PTGSKOptimizer self.assertIsNotNone(optimizer) # # create target specification # opt_model.revert_to_initial_state( ) # set_states(s0) # remember to set the s0 again, so we have the same initial condition for our game tsa = api.TsTransform().to_average(t0, dt, n, sum_discharge) t_spec_1 = api.TargetSpecificationPts(tsa, cids, 1.0, api.KLING_GUPTA, 1.0, 0.0, 0.0, api.DISCHARGE, 'test_uid') target_spec = api.TargetSpecificationVector() target_spec.append(t_spec_1) upper_bound = model_type.parameter_t(model.get_region_parameter( )) # the model_type know it's parameter_t lower_bound = model_type.parameter_t(model.get_region_parameter()) upper_bound.kirchner.c1 = -1.9 lower_bound.kirchner.c1 = -3.0 upper_bound.kirchner.c2 = 0.99 lower_bound.kirchner.c2 = 0.80 optimizer.set_target_specification(target_spec, lower_bound, upper_bound) # Not needed, it will automatically get one. # optimizer.establish_initial_state_from_model() # s0_0 = optimizer.get_initial_state(0) # optimizer.set_verbose_level(1000) p0 = model_type.parameter_t(model.get_region_parameter()) orig_c1 = p0.kirchner.c1 orig_c2 = p0.kirchner.c2 # model.get_cells()[0].env_ts.precipitation.set(0, 5.1) # model.get_cells()[0].env_ts.precipitation.set(1, 4.9) goal_f0 = optimizer.calculate_goal_function(p0) p0.kirchner.c1 = -2.4 p0.kirchner.c2 = 0.91 opt_param = optimizer.optimize(p0, 1500, 0.1, 1e-5) goal_fx = optimizer.calculate_goal_function(opt_param) p0.kirchner.c1 = -2.4 p0.kirchner.c2 = 0.91 # goal_fx1 = optimizer.calculate_goal_function(p0) self.assertLessEqual(goal_fx, 10.0) self.assertAlmostEqual(orig_c1, opt_param.kirchner.c1, 4) self.assertAlmostEqual(orig_c2, opt_param.kirchner.c2, 4) # verify the interface to the new optimize_global function global_opt_param = optimizer.optimize_global(p0, max_n_evaluations=1500, max_seconds=3.0, solver_eps=1e-5) self.assertIsNotNone( global_opt_param ) # just to ensure signature and results are covered
def run_calibration(self, model_t): # set up configuration config_dir = path.join(path.dirname(__file__), "netcdf") cfg = orchestration.YAMLConfig("atnsjoen_calibration.yaml", "atnsjoen", config_dir=config_dir, data_dir=shyftdata_dir, model_t=model_t) time_axis = cfg.time_axis # get a simulator simulator = cfg.get_simulator() n_cells = simulator.region_model.size() state_repos = DefaultStateRepository(cfg.model_t, n_cells) s0 = state_repos.get_state(0) param = simulator.region_model.get_region_parameter() # not needed, we auto initialize to default if not done explicitely #if model_t in [pt_hs_k.PTHSKOptModel]: # for i in range(len(s0)): # s0[i].snow.distribute(param.hs) simulator.run(time_axis, s0) cid = 1 target_discharge_ts = simulator.region_model.statistics.discharge( [cid]) target_discharge = api.TsTransform().to_average( time_axis.time(0), time_axis.time(1) - time_axis.time(0), time_axis.size(), target_discharge_ts) # Perturb parameters p_vec_orig = [param.get(i) for i in range(param.size())] p_vec_min = p_vec_orig[:] p_vec_max = p_vec_orig[:] p_vec_guess = p_vec_orig[:] random.seed(0) p_names = [] for i in range(4): p_names.append(param.get_name(i)) p_vec_min[i] *= 0.5 p_vec_max[i] *= 1.5 p_vec_guess[i] = random.uniform(p_vec_min[i], p_vec_max[i]) if p_vec_min[i] > p_vec_max[i]: p_vec_min[i], p_vec_max[i] = p_vec_max[i], p_vec_min[i] p_min = simulator.region_model.parameter_t() p_max = simulator.region_model.parameter_t() p_guess = simulator.region_model.parameter_t() p_min.set(p_vec_min) p_max.set(p_vec_max) p_guess.set(p_vec_guess) # Find parameters target_spec = api.TargetSpecificationPts(target_discharge, api.IntVector([cid]), 1.0, api.KLING_GUPTA) target_spec_vec = api.TargetSpecificationVector( ) # ([target_spec]) does not yet work target_spec_vec.append(target_spec) self.assertEqual(simulator.optimizer.trace_size, 0) # before optmize, trace_size should be 0 p_opt = simulator.optimize(time_axis, s0, target_spec_vec, p_guess, p_min, p_max) self.assertGreater(simulator.optimizer.trace_size, 0) # after opt, some trace values should be there # the trace values are in the order of appearance 0...trace_size-1 # goal_fn_values = simulator.optimizer.trace_goal_function_values.to_numpy( ) # all of them, as np array self.assertEqual(len(goal_fn_values), simulator.optimizer.trace_size) p_last = simulator.optimizer.trace_parameter( simulator.optimizer.trace_size - 1) # get out the last (not neccessary the best) self.assertIsNotNone(p_last) simulator.region_model.set_catchment_parameter(cid, p_opt) simulator.run(time_axis, s0) found_discharge = simulator.region_model.statistics.discharge([cid]) t_vs = np.array([ target_discharge.value(i) for i in range(target_discharge.size()) ]) t_ts = np.array( [target_discharge.time(i) for i in range(target_discharge.size())]) f_vs = np.array( [found_discharge.value(i) for i in range(found_discharge.size())]) f_ts = np.array( [found_discharge.time(i) for i in range(found_discharge.size())]) self.assertTrue(np.linalg.norm(t_ts - f_ts) < 1.0e-10) self.assertTrue(np.linalg.norm(t_vs - f_vs) < 1.0e-3)