def test_test_gages(self): data_model = GagesModel.load_datamodel( self.config_data.data_path["Temp"], data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') with torch.cuda.device(1): pred, obs = master_test(data_model, epoch=self.test_epoch) basin_area = data_model.data_source.read_attr( data_model.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = data_model.data_source.read_attr( data_model.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(data_model.data_source.data_config.data_path['Temp'], self.test_epoch, pred, obs) plot_we_need(data_model, obs, pred, id_col="STAID", lon_col="LNG_GAGE", lat_col="LAT_GAGE")
def camels_lstm(args): update_cfg(cfg, args) random_seed = cfg.RANDOM_SEED test_epoch = cfg.TEST_EPOCH gpu_num = cfg.CTX train_mode = cfg.TRAIN_MODE cache = cfg.CACHE.STATE print("train and test in CAMELS: \n") config_data = GagesConfig(cfg) camels531_gageid_file = os.path.join(config_data.data_path["DB"], "camels531", "camels531.txt") gauge_df = pd.read_csv(camels531_gageid_file, dtype={"GaugeID": str}) gauge_list = gauge_df["GaugeID"].values all_sites_camels_531 = np.sort( [str(gauge).zfill(8) for gauge in gauge_list]) gages_model = GagesModels(config_data, screen_basin_area_huc4=False, sites_id=all_sites_camels_531.tolist()) gages_model_train = gages_model.data_model_train gages_model_test = gages_model.data_model_test if cache: save_datamodel(gages_model_train, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow', forcing_file_name='forcing', attr_file_name='attr', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') save_datamodel(gages_model_test, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') with torch.cuda.device(gpu_num): if train_mode: master_train(gages_model_train, random_seed=random_seed) pred, obs = master_test(gages_model_test, epoch=test_epoch) basin_area = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(gages_model_test.data_source.data_config.data_path['Temp'], test_epoch, pred, obs)
def test_forecast(self): source_data = unserialize_pickle(self.data_source_test_file) # 存储data_model,因为data_model里的数据如果直接序列化会比较慢,所以各部分分别序列化,dict的直接序列化为json文件,数据的HDF5 stat_dict = unserialize_json(self.stat_file) data_flow = unserialize_numpy(self.flow_npy_file) data_forcing = unserialize_numpy(self.forcing_npy_file) data_attr = unserialize_numpy(self.attr_npy_file) # dictFactorize.json is the explanation of value of categorical variables var_dict = unserialize_json(self.var_dict_file) f_dict = unserialize_json(self.f_dict_file) t_s_dict = unserialize_json(self.t_s_dict_file) data_model_test = DataModel(source_data, data_flow, data_forcing, data_attr, var_dict, f_dict, stat_dict, t_s_dict) pred, obs = hydroDL.master_test(data_model_test) print(pred) print(obs) serialize_numpy(pred, self.flow_pred_file) serialize_numpy(obs, self.flow_obs_file)
def dor_lstm(args): update_cfg(cfg, args) random_seed = cfg.RANDOM_SEED test_epoch = cfg.TEST_EPOCH gpu_num = cfg.CTX train_mode = cfg.TRAIN_MODE dor = cfg.GAGES.attrScreenParams.DOR cache = cfg.CACHE.STATE print("train and test in some dor basins: \n") config_data = GagesConfig(cfg) gages_model = GagesModels(config_data, screen_basin_area_huc4=False, DOR=dor) gages_model_train = gages_model.data_model_train gages_model_test = gages_model.data_model_test if cache: save_datamodel(gages_model_train, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow', forcing_file_name='forcing', attr_file_name='attr', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') save_datamodel(gages_model_test, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') with torch.cuda.device(gpu_num): if train_mode: master_train(gages_model_train, random_seed=random_seed) pred, obs = master_test(gages_model_test, epoch=test_epoch) basin_area = gages_model_test.data_source.read_attr(gages_model_test.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_model_test.data_source.read_attr(gages_model_test.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(gages_model_test.data_source.data_config.data_path['Temp'], test_epoch, pred, obs)
def pub_lstm(args): update_cfg(cfg, args) random_seed = cfg.RANDOM_SEED test_epoch = cfg.TEST_EPOCH gpu_num = cfg.CTX train_mode = cfg.TRAIN_MODE cache = cfg.CACHE.STATE pub_plan = cfg.PUB_PLAN plus = cfg.PLUS dor = cfg.GAGES.attrScreenParams.DOR split_num = cfg.SPLIT_NUM print("train and test for PUB: \n") config_data = GagesConfig(cfg) if cache: eco_names = [ ("ECO2_CODE", 5.2), ("ECO2_CODE", 5.3), ("ECO2_CODE", 6.2), ("ECO2_CODE", 7.1), ("ECO2_CODE", 8.1), ("ECO2_CODE", 8.2), ("ECO2_CODE", 8.3), ("ECO2_CODE", 8.4), ("ECO2_CODE", 8.5), ("ECO2_CODE", 9.2), ("ECO2_CODE", 9.3), ("ECO2_CODE", 9.4), ("ECO2_CODE", 9.5), ("ECO2_CODE", 9.6), ("ECO2_CODE", 10.1), ("ECO2_CODE", 10.2), ("ECO2_CODE", 10.4), ("ECO2_CODE", 11.1), ("ECO2_CODE", 12.1), ("ECO2_CODE", 13.1) ] quick_data_dir = os.path.join(config_data.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "conus-all_90-10_nan-0.0_00-1.0") data_model_train = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') data_model_test = GagesModel.load_datamodel( data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') conus_sites_id = data_model_train.t_s_dict["sites_id"] if pub_plan == 0: """do a pub test like freddy's""" camels531_gageid_file = os.path.join(config_data.data_path["DB"], "camels531", "camels531.txt") gauge_df = pd.read_csv(camels531_gageid_file, dtype={"GaugeID": str}) gauge_list = gauge_df["GaugeID"].values all_sites_camels_531 = np.sort( [str(gauge).zfill(8) for gauge in gauge_list]) sites_id_train = np.intersect1d(conus_sites_id, all_sites_camels_531) # basins not in CAMELS sites_id_test = [ a_temp_site for a_temp_site in conus_sites_id if a_temp_site not in all_sites_camels_531 ] assert (all(x < y for x, y in zip(sites_id_test, sites_id_test[1:]))) elif pub_plan == 1 or pub_plan == 4: source_data_dor1 = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, DOR=-dor) # basins with dams source_data_withdams = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, dam_num=[1, 100000]) # basins without dams source_data_withoutdams = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, dam_num=0) sites_id_dor1 = source_data_dor1.all_configs['flow_screen_gage_id'] sites_id_withdams = source_data_withdams.all_configs[ 'flow_screen_gage_id'] if pub_plan == 1: sites_id_train = source_data_withoutdams.all_configs[ 'flow_screen_gage_id'] sites_id_test = np.intersect1d( np.array(sites_id_dor1), np.array(sites_id_withdams)).tolist() else: sites_id_train = np.intersect1d( np.array(sites_id_dor1), np.array(sites_id_withdams)).tolist() sites_id_test = source_data_withoutdams.all_configs[ 'flow_screen_gage_id'] elif pub_plan == 2 or pub_plan == 5: source_data_dor1 = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, DOR=dor) # basins without dams source_data_withoutdams = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, dam_num=0) if pub_plan == 2: sites_id_train = source_data_withoutdams.all_configs[ 'flow_screen_gage_id'] sites_id_test = source_data_dor1.all_configs[ 'flow_screen_gage_id'] else: sites_id_train = source_data_dor1.all_configs[ 'flow_screen_gage_id'] sites_id_test = source_data_withoutdams.all_configs[ 'flow_screen_gage_id'] elif pub_plan == 3 or pub_plan == 6: dor_1 = -dor dor_2 = dor source_data_dor1 = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, DOR=dor_1) # basins with dams source_data_withdams = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, dam_num=[1, 100000]) sites_id_dor1 = source_data_dor1.all_configs['flow_screen_gage_id'] sites_id_withdams = source_data_withdams.all_configs[ 'flow_screen_gage_id'] source_data_dor2 = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, DOR=dor_2) if pub_plan == 3: sites_id_train = np.intersect1d( np.array(sites_id_dor1), np.array(sites_id_withdams)).tolist() sites_id_test = source_data_dor2.all_configs[ 'flow_screen_gage_id'] else: sites_id_train = source_data_dor2.all_configs[ 'flow_screen_gage_id'] sites_id_test = np.intersect1d( np.array(sites_id_dor1), np.array(sites_id_withdams)).tolist() else: print("wrong plan") sites_id_train = None sites_id_test = None train_sites_in_conus = np.intersect1d(conus_sites_id, sites_id_train) test_sites_in_conus = np.intersect1d(conus_sites_id, sites_id_test) if plus == 0: all_index_lst_train_1 = [] # all sites come from train1 dataset sites_lst_train = [] all_index_lst_test_1 = [] sites_lst_test_1 = [] all_index_lst_test_2 = [] sites_lst_test_2 = [] np.random.seed(random_seed) kf = KFold(n_splits=split_num, shuffle=True, random_state=random_seed) eco_name_chosen = [] for eco_name in eco_names: eco_source_data = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, ecoregion=eco_name) eco_sites_id = eco_source_data.all_configs[ 'flow_screen_gage_id'] train_sites_id_inter = np.intersect1d(train_sites_in_conus, eco_sites_id) test_sites_id_inter = np.intersect1d(test_sites_in_conus, eco_sites_id) if train_sites_id_inter.size < split_num or test_sites_id_inter.size < 1: continue for train, test in kf.split(train_sites_id_inter): all_index_lst_train_1.append(train) sites_lst_train.append(train_sites_id_inter[train]) all_index_lst_test_1.append(test) sites_lst_test_1.append(train_sites_id_inter[test]) if test_sites_id_inter.size < test.size: all_index_lst_test_2.append( np.arange(test_sites_id_inter.size)) sites_lst_test_2.append(test_sites_id_inter) else: test2_chosen_idx = np.random.choice( test_sites_id_inter.size, test.size, replace=False) all_index_lst_test_2.append(test2_chosen_idx) sites_lst_test_2.append( test_sites_id_inter[test2_chosen_idx]) eco_name_chosen.append(eco_name) elif plus == -1: print("camels pub, only do pub on the camels basins") all_index_lst_train_1 = [] # all sites come from train1 dataset sites_lst_train = [] all_index_lst_test_1 = [] sites_lst_test_1 = [] np.random.seed(random_seed) kf = KFold(n_splits=split_num, shuffle=True, random_state=random_seed) eco_name_chosen = [] for eco_name in eco_names: eco_source_data = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, ecoregion=eco_name) eco_sites_id = eco_source_data.all_configs[ 'flow_screen_gage_id'] train_sites_id_inter = np.intersect1d(train_sites_in_conus, eco_sites_id) if train_sites_id_inter.size < split_num: continue for train, test in kf.split(train_sites_id_inter): all_index_lst_train_1.append(train) sites_lst_train.append(train_sites_id_inter[train]) all_index_lst_test_1.append(test) sites_lst_test_1.append(train_sites_id_inter[test]) eco_name_chosen.append(eco_name) elif plus == -2: print( "camels pub, only do pub on the camels basins, same with freddy's split method" ) all_index_lst_train_1 = [] # all sites come from train1 dataset sites_lst_train = [] all_index_lst_test_1 = [] sites_lst_test_1 = [] np.random.seed(random_seed) kf = KFold(n_splits=split_num, shuffle=True, random_state=random_seed) for train, test in kf.split(train_sites_in_conus): all_index_lst_train_1.append(train) sites_lst_train.append(train_sites_in_conus[train]) all_index_lst_test_1.append(test) sites_lst_test_1.append(train_sites_in_conus[test]) else: sites_lst_train = [] sites_lst_test_1 = [] sites_lst_test_2 = [] np.random.seed(random_seed) kf = KFold(n_splits=split_num, shuffle=True, random_state=random_seed) eco_name_chosen = [] for eco_name in eco_names: eco_source_data = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, ecoregion=eco_name) eco_sites_id = eco_source_data.all_configs[ 'flow_screen_gage_id'] sites_id_inter_1 = np.intersect1d(train_sites_in_conus, eco_sites_id) sites_id_inter_2 = np.intersect1d(test_sites_in_conus, eco_sites_id) if sites_id_inter_1.size < sites_id_inter_2.size: if sites_id_inter_1.size < split_num: continue for train, test in kf.split(sites_id_inter_1): sites_lst_train_1 = sites_id_inter_1[train] sites_lst_test_1.append(sites_id_inter_1[test]) chosen_lst_2 = random_choice_no_return( sites_id_inter_2, [train.size, test.size]) sites_lst_train_2 = chosen_lst_2[0] sites_lst_test_2.append(chosen_lst_2[1]) sites_lst_train.append( np.sort( np.append(sites_lst_train_1, sites_lst_train_2))) else: if sites_id_inter_2.size < split_num: continue for train, test in kf.split(sites_id_inter_2): sites_lst_train_2 = sites_id_inter_2[train] sites_lst_test_2.append(sites_id_inter_2[test]) chosen_lst_1 = random_choice_no_return( sites_id_inter_1, [train.size, test.size]) sites_lst_train_1 = chosen_lst_1[0] sites_lst_test_1.append(chosen_lst_1[1]) sites_lst_train.append( np.sort( np.append(sites_lst_train_1, sites_lst_train_2))) eco_name_chosen.append(eco_name) for i in range(split_num): sites_ids_train_ilst = [ sites_lst_train[j] for j in range(len(sites_lst_train)) if j % split_num == i ] sites_ids_train_i = np.sort( reduce(lambda x, y: np.hstack((x, y)), sites_ids_train_ilst)) sites_ids_test_ilst_1 = [ sites_lst_test_1[j] for j in range(len(sites_lst_test_1)) if j % split_num == i ] sites_ids_test_i_1 = np.sort( reduce(lambda x, y: np.hstack((x, y)), sites_ids_test_ilst_1)) if plus >= 0: sites_ids_test_ilst_2 = [ sites_lst_test_2[j] for j in range(len(sites_lst_test_2)) if j % split_num == i ] sites_ids_test_i_2 = np.sort( reduce(lambda x, y: np.hstack((x, y)), sites_ids_test_ilst_2)) config_data_i = GagesConfig.set_subdir(cfg, str(i)) gages_model_train_i = GagesModel.update_data_model( config_data_i, data_model_train, sites_id_update=sites_ids_train_i, data_attr_update=True, screen_basin_area_huc4=False) gages_model_test_baseline_i = GagesModel.update_data_model( config_data_i, data_model_test, sites_id_update=sites_ids_train_i, data_attr_update=True, train_stat_dict=gages_model_train_i.stat_dict, screen_basin_area_huc4=False) gages_model_test_i_1 = GagesModel.update_data_model( config_data_i, data_model_test, sites_id_update=sites_ids_test_i_1, data_attr_update=True, train_stat_dict=gages_model_train_i.stat_dict, screen_basin_area_huc4=False) if plus >= 0: gages_model_test_i_2 = GagesModel.update_data_model( config_data_i, data_model_test, sites_id_update=sites_ids_test_i_2, data_attr_update=True, train_stat_dict=gages_model_train_i.stat_dict, screen_basin_area_huc4=False) save_datamodel(gages_model_train_i, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow', forcing_file_name='forcing', attr_file_name='attr', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') save_datamodel(gages_model_test_baseline_i, data_source_file_name='test_data_source_base.txt', stat_file_name='test_Statistics_base.json', flow_file_name='test_flow_base', forcing_file_name='test_forcing_base', attr_file_name='test_attr_base', f_dict_file_name='test_dictFactorize_base.json', var_dict_file_name='test_dictAttribute_base.json', t_s_dict_file_name='test_dictTimeSpace_base.json') save_datamodel(gages_model_test_i_1, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') if plus >= 0: save_datamodel(gages_model_test_i_2, data_source_file_name='test_data_source_2.txt', stat_file_name='test_Statistics_2.json', flow_file_name='test_flow_2', forcing_file_name='test_forcing_2', attr_file_name='test_attr_2', f_dict_file_name='test_dictFactorize_2.json', var_dict_file_name='test_dictAttribute_2.json', t_s_dict_file_name='test_dictTimeSpace_2.json') print("save ecoregion " + str(i) + " data model") with torch.cuda.device(gpu_num): if train_mode: for i in range(split_num): data_model = GagesModel.load_datamodel( config_data.data_path["Temp"], str(i), data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') master_train(data_model, random_seed=random_seed) for i in range(split_num): data_model_baseline = GagesModel.load_datamodel( config_data.data_path["Temp"], str(i), data_source_file_name='test_data_source_base.txt', stat_file_name='test_Statistics_base.json', flow_file_name='test_flow_base.npy', forcing_file_name='test_forcing_base.npy', attr_file_name='test_attr_base.npy', f_dict_file_name='test_dictFactorize_base.json', var_dict_file_name='test_dictAttribute_base.json', t_s_dict_file_name='test_dictTimeSpace_base.json') data_model = GagesModel.load_datamodel( config_data.data_path["Temp"], str(i), data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') if plus >= 0: data_model_2 = GagesModel.load_datamodel( config_data.data_path["Temp"], str(i), data_source_file_name='test_data_source_2.txt', stat_file_name='test_Statistics_2.json', flow_file_name='test_flow_2.npy', forcing_file_name='test_forcing_2.npy', attr_file_name='test_attr_2.npy', f_dict_file_name='test_dictFactorize_2.json', var_dict_file_name='test_dictAttribute_2.json', t_s_dict_file_name='test_dictTimeSpace_2.json') pred_baseline, obs_baseline = master_test(data_model_baseline, epoch=test_epoch, save_file_suffix="base") basin_area_baseline = data_model_baseline.data_source.read_attr( data_model_baseline.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep_baseline = data_model_baseline.data_source.read_attr( data_model_baseline.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep_baseline = mean_prep_baseline / 365 * 10 pred_baseline = _basin_norm(pred_baseline, basin_area_baseline, mean_prep_baseline, to_norm=False) obs_baseline = _basin_norm(obs_baseline, basin_area_baseline, mean_prep_baseline, to_norm=False) save_result( data_model_baseline.data_source.data_config.data_path['Temp'], test_epoch, pred_baseline, obs_baseline, pred_name='flow_pred_base', obs_name='flow_obs_base') pred, obs = master_test(data_model, epoch=test_epoch) basin_area = data_model.data_source.read_attr( data_model.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = data_model.data_source.read_attr( data_model.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(data_model.data_source.data_config.data_path['Temp'], test_epoch, pred, obs) if plus >= 0: pred_2, obs_2 = master_test(data_model_2, epoch=test_epoch, save_file_suffix="2") basin_area_2 = data_model_2.data_source.read_attr( data_model_2.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep_2 = data_model_2.data_source.read_attr( data_model_2.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep_2 = mean_prep_2 / 365 * 10 pred_2 = _basin_norm(pred_2, basin_area_2, mean_prep_2, to_norm=False) obs_2 = _basin_norm(obs_2, basin_area_2, mean_prep_2, to_norm=False) save_result( data_model_2.data_source.data_config.data_path['Temp'], test_epoch, pred_2, obs_2, pred_name='flow_pred_2', obs_name='flow_obs_2')
def test_some_reservoirs(self): print("train and test in basins with different combination: \n") dam_plan = self.dam_plan config_data = self.config_data test_epoch = self.test_epoch if dam_plan == 2: dam_num = 0 dor = self.config_file.GAGES.attrScreenParams.DOR source_data_dor1 = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, DOR=dor) # basins with dams source_data_withoutdams = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, dam_num=dam_num) sites_id_dor1 = source_data_dor1.all_configs['flow_screen_gage_id'] sites_id_withoutdams = source_data_withoutdams.all_configs[ 'flow_screen_gage_id'] sites_id_chosen = np.sort( np.union1d(np.array(sites_id_dor1), np.array(sites_id_withoutdams))).tolist() elif dam_plan == 3: dam_num = [1, 100000] # basins with dams source_data_withdams = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, dam_num=dam_num) sites_id_chosen = source_data_withdams.all_configs[ 'flow_screen_gage_id'] else: print("wrong choice") sites_id_chosen = None gages_model = GagesModels(config_data, screen_basin_area_huc4=False, sites_id=sites_id_chosen) gages_model_train = gages_model.data_model_train gages_model_test = gages_model.data_model_test if self.cache: save_datamodel(gages_model_train, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow', forcing_file_name='forcing', attr_file_name='attr', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') save_datamodel(gages_model_test, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') with torch.cuda.device(self.gpu_num): if self.train_mode: master_train(gages_model_train, random_seed=self.random_seed) pred, obs = master_test(gages_model_test, epoch=test_epoch) basin_area = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result( gages_model_test.data_source.data_config.data_path['Temp'], test_epoch, pred, obs)
def dam_lstm(args): update_cfg(cfg, args) random_seed = cfg.RANDOM_SEED test_epoch = cfg.TEST_EPOCH gpu_num = cfg.CTX train_mode = cfg.TRAIN_MODE dor = cfg.GAGES.attrScreenParams.DOR cache = cfg.CACHE.STATE print("train and test in basins with dams: \n") config_data = GagesConfig(cfg) source_data_dor1 = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, DOR=dor) # basins with dams source_data_withdams = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, dam_num=[1, 100000]) sites_id_dor1 = source_data_dor1.all_configs['flow_screen_gage_id'] sites_id_withdams = source_data_withdams.all_configs['flow_screen_gage_id'] sites_id_chosen = np.intersect1d(np.array(sites_id_dor1), np.array(sites_id_withdams)).tolist() gages_model = GagesModels(config_data, screen_basin_area_huc4=False, sites_id=sites_id_chosen) gages_model_train = gages_model.data_model_train gages_model_test = gages_model.data_model_test if cache: save_datamodel(gages_model_train, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow', forcing_file_name='forcing', attr_file_name='attr', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') save_datamodel(gages_model_test, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') with torch.cuda.device(gpu_num): if train_mode: master_train(gages_model_train, random_seed=random_seed) pred, obs = master_test(gages_model_test, epoch=test_epoch) basin_area = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(gages_model_test.data_source.data_config.data_path['Temp'], test_epoch, pred, obs)
# train_stat_dict=gages_model_train.stat_dict, # screen_basin_area_huc4=False) gages_model_train = GagesModel.update_data_model( config_data, data_model_train, data_attr_update=True, screen_basin_area_huc4=False) gages_model_test = GagesModel.update_data_model( config_data, data_model_test, data_attr_update=True, train_stat_dict=gages_model_train.stat_dict, screen_basin_area_huc4=False) with torch.cuda.device(gpu_lst[i]): pred, obs = master_test(gages_model_test, epoch=test_epoch) basin_area = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result( gages_model_test.data_source.data_config.data_path['Temp'], test_epoch, pred, obs) plot_we_need(gages_model_test, obs, pred,
def synergy_ecoregion(args): update_cfg(cfg, args) cache = cfg.CACHE.STATE train_mode = cfg.TRAIN_MODE test_epoch = cfg.TEST_EPOCH config_data = GagesConfig(cfg) eco_names = [("ECO2_CODE", 5.2), ("ECO2_CODE", 5.3), ("ECO2_CODE", 6.2), ("ECO2_CODE", 7.1), ("ECO2_CODE", 8.1), ("ECO2_CODE", 8.2), ("ECO2_CODE", 8.3), ("ECO2_CODE", 8.4), ("ECO2_CODE", 8.5), ("ECO2_CODE", 9.2), ("ECO2_CODE", 9.3), ("ECO2_CODE", 9.4), ("ECO2_CODE", 9.5), ("ECO2_CODE", 9.6), ("ECO2_CODE", 10.1), ("ECO2_CODE", 10.2), ("ECO2_CODE", 10.4), ("ECO2_CODE", 11.1), ("ECO2_CODE", 12.1), ("ECO2_CODE", 13.1)] quick_data_dir = os.path.join(config_data.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "conus-all_90-10_nan-0.0_00-1.0") data_model_train = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') data_model_test = GagesModel.load_datamodel( data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') for eco_name in eco_names: source_data = GagesSource.choose_some_basins( config_data, config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, ecoregion=eco_name) sites_id = source_data.all_configs['flow_screen_gage_id'] sites_id_inter = np.intersect1d(data_model_train.t_s_dict["sites_id"], sites_id) if sites_id_inter.size < 1: continue config_data = GagesConfig.set_subdir(cfg, str(eco_name[1])) gages_model_train = GagesModel.update_data_model( config_data, data_model_train, sites_id_update=sites_id, data_attr_update=True, screen_basin_area_huc4=False) gages_model_test = GagesModel.update_data_model( config_data, data_model_test, sites_id_update=sites_id, data_attr_update=True, train_stat_dict=gages_model_train.stat_dict, screen_basin_area_huc4=False) if cache: save_datamodel(gages_model_train, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow', forcing_file_name='forcing', attr_file_name='attr', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') save_datamodel(gages_model_test, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') print("save ecoregion " + str(eco_name[1]) + " data model") with torch.cuda.device(0): if train_mode: master_train(gages_model_train) pred, obs = master_test(gages_model_test, epoch=test_epoch) basin_area = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result( gages_model_test.data_source.data_config.data_path['Temp'], test_epoch, pred, obs)
def conus_lstm(args): update_cfg(cfg, args) random_seed = cfg.RANDOM_SEED test_epoch = cfg.TEST_EPOCH gpu_num = cfg.CTX train_mode = cfg.TRAIN_MODE print("train and test in CONUS: \n") print(cfg) config_data = GagesConfig(cfg) gages_model = GagesModels(config_data, screen_basin_area_huc4=False) gages_model_train = gages_model.data_model_train gages_model_test = gages_model.data_model_test if cfg.CACHE.GEN_QUICK_DATA: if not os.path.isdir(cfg.CACHE.DATA_DIR): os.makedirs(cfg.CACHE.DATA_DIR) save_quick_data(gages_model_train, cfg.CACHE.DATA_DIR, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow', forcing_file_name='forcing', attr_file_name='attr', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') save_quick_data(gages_model_test, cfg.CACHE.DATA_DIR, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') if cfg.CACHE.STATE: save_datamodel(gages_model_train, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow', forcing_file_name='forcing', attr_file_name='attr', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') save_datamodel(gages_model_test, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') with torch.cuda.device(gpu_num): if train_mode: master_train(gages_model_train, random_seed=random_seed) pred, obs = master_test(gages_model_test, epoch=test_epoch) basin_area = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_model_test.data_source.read_attr( gages_model_test.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(gages_model_test.data_source.data_config.data_path['Temp'], test_epoch, pred, obs)