def test_dam_test(self): with torch.cuda.device(0): gages_input = GagesModel.load_datamodel( self.config_data.data_path["Temp"], data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') pred, obs = master_test(gages_input, epoch=cfg.TEST_EPOCH) basin_area = gages_input.data_source.read_attr( gages_input.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_input.data_source.read_attr( gages_input.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(gages_input.data_source.data_config.data_path['Temp'], cfg.TEST_EPOCH, pred, obs) plot_we_need(gages_input, obs, pred, id_col="STAID", lon_col="LNG_GAGE", lat_col="LAT_GAGE")
def test_test_camels(self): data_model = CamelsModel.load_datamodel( self.config_data.data_path["Temp"], data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') with torch.cuda.device(2): pred, obs = master_test(data_model) basin_area = data_model.data_source.read_attr( data_model.t_s_dict["sites_id"], ['area_gages2'], is_return_dict=False) mean_prep = data_model.data_source.read_attr( data_model.t_s_dict["sites_id"], ['p_mean'], is_return_dict=False) pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) plot_we_need(data_model, obs, pred, id_col="id", lon_col="lon", lat_col="lat")
def test_test_gages_wo_attr(self): config_dir = definitions.CONFIG_DIR config_file = os.path.join(config_dir, "susquehanna/config_exp2.ini") subdir = r"susquehanna/exp2" config_data = GagesConfig.set_subdir(config_file, subdir) data_model = GagesModelWoBasinNorm.load_datamodel( config_data.data_path["Temp"], data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') with torch.cuda.device(2): pred, obs = master_test(data_model, epoch=self.test_epoch) save_result(data_model.data_source.data_config.data_path['Temp'], self.test_epoch, pred, obs) plot_we_need(data_model, obs, pred, id_col="STAID", lon_col="LNG_GAGE", lat_col="LAT_GAGE")
def test_test_gages(self): data_model = GagesModel.load_datamodel(self.config_data.data_path["Temp"], data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') with torch.cuda.device(1): data_models = GagesModel.every_model(data_model) obs_lst = [] pred_lst = [] for i in range(len(data_models)): print("\n", "Testing model", str(i + 1), ":\n") pred, obs = master_test(data_models[i]) basin_area = data_models[i].data_source.read_attr(data_models[i].t_s_dict["sites_id"], ['area_gages2'], is_return_dict=False) mean_prep = data_models[i].data_source.read_attr(data_models[i].t_s_dict["sites_id"], ['p_mean'], is_return_dict=False) pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) obs_lst.append(obs.flatten()) pred_lst.append(pred.flatten()) preds = np.array(pred_lst) obss = np.array(obs_lst) plot_we_need(data_model, obss, preds, id_col="id", lon_col="lon", lat_col="lat")
def test_data_temp_test_damcls(self): with torch.cuda.device(0): nid_dir = os.path.join("/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "quickdata") gage_main_dam_purpose = unserialize_json(os.path.join(nid_dir, "dam_main_purpose_dict.json")) gage_main_dam_purpose_lst = list(gage_main_dam_purpose.values()) gage_main_dam_purpose_unique = np.unique(gage_main_dam_purpose_lst) for i in range(0, gage_main_dam_purpose_unique.size): df = GagesModel.load_datamodel(self.config_data.data_path["Temp"], gage_main_dam_purpose_unique[i], data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') new_temp_dir = os.path.join(df.data_source.data_config.model_dict["dir"]["Temp"], gage_main_dam_purpose_unique[i]) new_out_dir = os.path.join(df.data_source.data_config.model_dict["dir"]["Out"], gage_main_dam_purpose_unique[i]) df.update_datamodel_dir(new_temp_dir, new_out_dir) pred, obs = master_test(df, epoch=self.test_epoch) basin_area = df.data_source.read_attr(df.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = df.data_source.read_attr(df.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(new_temp_dir, self.test_epoch, pred, obs)
def test_dam_test(self): quick_data_dir = os.path.join(self.config_data.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "conus-all_90-10_nan-0.0_00-1.0") data_model_train = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') data_model_test = GagesModel.load_datamodel( data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') gages_model_train = GagesModel.update_data_model( self.config_data, data_model_train) gages_model_test = GagesModel.update_data_model( self.config_data, data_model_test, train_stat_dict=gages_model_train.stat_dict) nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "quickdata") nid_input = NidModel.load_nidmodel( nid_dir, nid_file=self.nid_file, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) data_input = GagesDamDataModel(gages_model_test, nid_input, True, gage_main_dam_purpose) gages_input = choose_which_purpose(data_input) pred, obs = master_test(gages_input) basin_area = gages_input.data_source.read_attr( gages_input.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_input.data_source.read_attr( gages_input.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(gages_input.data_source.data_config.data_path['Temp'], self.test_epoch, pred, obs)
def test_explore_test(self): models_num = 0 dirs = os.listdir(self.config_data.data_path["Temp"]) for dir_temp in dirs: if os.path.isdir( os.path.join(self.config_data.data_path["Temp"], dir_temp)): models_num += 1 for count in range(models_num): print("\n", "testing model", str(count + 1), ":\n") data_model = GagesModel.load_datamodel( self.config_data.data_path["Temp"], str(count), data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') pred, obs = master_test(data_model) pred = pred.reshape(pred.shape[0], pred.shape[1]) obs = obs.reshape(obs.shape[0], obs.shape[1]) inds = statError(obs, pred) show_me_num = 5 t_s_dict = data_model.t_s_dict sites = np.array(t_s_dict["sites_id"]) t_range = np.array(t_s_dict["t_final_range"]) ts_fig = plot_ts_obs_pred(obs, pred, sites, t_range, show_me_num) ts_fig.savefig( os.path.join( data_model.data_source.data_config.data_path["Out"], "ts_fig.png")) # # plot box,使用seaborn库 keys = ["Bias", "RMSE", "NSE"] inds_test = subset_of_dict(inds, keys) box_fig = plot_diff_boxes(inds_test) box_fig.savefig( os.path.join( data_model.data_source.data_config.data_path["Out"], "box_fig.png")) # plot map sites_df = pd.DataFrame({ "sites": sites, keys[2]: inds_test[keys[2]] }) plot_ind_map(data_model.data_source.all_configs['gage_point_file'], sites_df)
def test_test_susquehanna(self): t_test = self.config_data.model_dict["data"]["tRangeTest"] source_data = SusquehannaSource(self.config_data, t_test) # 构建输入数据类对象 data_model = SusquehannaModel(source_data) with torch.cuda.device(1): # pred, obs = master_test(data_model) pred, obs = master_test(data_model, epoch=self.test_epoch) basin_area = data_model.data_attr[:, 0:1] mean_prep = data_model.ppt_avg_basin pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(data_model.data_source.data_config.data_path['Temp'], self.test_epoch, pred, obs) plot_we_need(data_model, obs, pred, id_col="id", lon_col="lon", lat_col="lat")
def test_Susquehanna(self): t_test = self.config_data.model_dict["data"]["tRangeTest"] source_data = SusquehannaSource(self.config_data, t_test) # 构建输入数据类对象 data_model = SusquehannaModel(source_data) with torch.cuda.device(1): # pred, obs = master_test(data_model) pred, obs = master_test(data_model, epoch=300) flow_pred_file = os.path.join( data_model.data_source.data_config.data_path['Temp'], 'flow_pred') flow_obs_file = os.path.join( data_model.data_source.data_config.data_path['Temp'], 'flow_obs') serialize_numpy(pred, flow_pred_file) serialize_numpy(obs, flow_obs_file) plot_we_need(data_model, obs, pred, id_col="id", lon_col="lon", lat_col="lat")
def test_test_gages4susquehanna(self): config_dir = definitions.CONFIG_DIR config_file = os.path.join(config_dir, "susquehanna/config_exp4.ini") subdir = r"susquehanna/exp4" config_data = GagesConfig.set_subdir(config_file, subdir) dor = -0.02 gages_model = GagesModels(config_data, screen_basin_area_huc4=False, DOR=dor) save_datamodel(gages_model.data_model_test, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') data_model = gages_model.data_model_test with torch.cuda.device(2): pred, obs = master_test(data_model, epoch=self.test_epoch) basin_area = data_model.data_source.read_attr( data_model.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = data_model.data_source.read_attr( data_model.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(data_model.data_source.data_config.data_path['Temp'], self.test_epoch, pred, obs) plot_we_need(data_model, obs, pred, id_col="STAID", lon_col="LNG_GAGE", lat_col="LAT_GAGE")
def test_dam_test(self): with torch.cuda.device(1): quick_data_dir = os.path.join(self.config_data.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "allnonref-dam_95-05_nan-0.1_00-1.0") data_model_test = GagesModel.load_datamodel( data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') gages_input = GagesModel.update_data_model(self.config_data, data_model_test) pred, obs = master_test(gages_input, epoch=self.test_epoch) basin_area = gages_input.data_source.read_attr( gages_input.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_input.data_source.read_attr( gages_input.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(gages_input.data_source.data_config.data_path['Temp'], self.test_epoch, pred, obs) plot_we_need(gages_input, obs, pred, id_col="STAID", lon_col="LNG_GAGE", lat_col="LAT_GAGE")