def test_dam_train(self): with torch.cuda.device(0): quick_data_dir = os.path.join(self.config_data.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "allnonref_85-05_nan-0.1_00-1.0") data_model_8595 = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') gages_model_train = GagesModel.update_data_model( self.config_data, data_model_8595) nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "quickdata") nid_input = NidModel.load_nidmodel( nid_dir, nid_file=self.nid_file, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) data_input = GagesDamDataModel(gages_model_train, nid_input, True, gage_main_dam_purpose) gages_input = choose_which_purpose(data_input) master_train(gages_input)
def test_gages_dam_attr(self): quick_data_dir = os.path.join(self.config_data.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "conus-all_90-10_nan-0.0_00-1.0") df = GagesModel.load_datamodel(data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') # nid_input = NidModel() nid_input = NidModel(self.config_data.config_file) # nid_dir = os.path.join("/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "quickdata") nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "test") save_nidinput(nid_input, nid_dir, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') data_input = GagesDamDataModel(df, nid_input) serialize_json(data_input.gage_main_dam_purpose, os.path.join(nid_dir, "dam_main_purpose_dict.json"))
def test_dam_train(self): """just test for one purpose as a case""" with torch.cuda.device(2): quick_data_dir = os.path.join(self.config_data.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "conus-all_90-10_nan-0.0_00-1.0") df = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "quickdata") nid_input = NidModel.load_nidmodel( nid_dir, nid_file=self.nid_file, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) data_input = GagesDamDataModel(df, nid_input, True, gage_main_dam_purpose) purpose_chosen = 'C' gages_input = choose_which_purpose(data_input, purpose=purpose_chosen) master_train(gages_input)
def test_dam_test(self): quick_data_dir = os.path.join(self.config_data.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "conus-all_90-10_nan-0.0_00-1.0") data_model_train = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') data_model_test = GagesModel.load_datamodel( data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') gages_model_train = GagesModel.update_data_model( self.config_data, data_model_train) gages_model_test = GagesModel.update_data_model( self.config_data, data_model_test, train_stat_dict=gages_model_train.stat_dict) nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "quickdata") nid_input = NidModel.load_nidmodel( nid_dir, nid_file=self.nid_file, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) data_input = GagesDamDataModel(gages_model_test, nid_input, True, gage_main_dam_purpose) gages_input = choose_which_purpose(data_input) pred, obs = master_test(gages_input) basin_area = gages_input.data_source.read_attr( gages_input.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = gages_input.data_source.read_attr( gages_input.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(gages_input.data_source.data_config.data_path['Temp'], self.test_epoch, pred, obs)
def test_dam_train(self): quick_data_dir = os.path.join(self.config_data_1.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "allnonref_85-05_nan-0.1_00-1.0") # for inv model, datamodel of train and test are same data_model_8595 = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') t_range1_train = self.config_data_1.model_dict["data"]["tRangeTrain"] gages_model1_train = GagesModel.update_data_model( self.config_data_1, data_model_8595, t_range_update=t_range1_train, data_attr_update=True) t_range2_train = self.config_data_2.model_dict["data"]["tRangeTrain"] gages_model2_train = GagesModel.update_data_model( self.config_data_2, data_model_8595, t_range_update=t_range2_train, data_attr_update=True) nid_dir = os.path.join( "/".join(self.config_data_1.data_path["DB"].split("/")[:-1]), "nid", "quickdata") nid_input = NidModel.load_nidmodel( nid_dir, nid_file=self.nid_file, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) gage_main_dam_purpose_lst = list(gage_main_dam_purpose.values()) gage_main_dam_purpose_unique = np.unique(gage_main_dam_purpose_lst) with torch.cuda.device(1): for i in range(0, gage_main_dam_purpose_unique.size): data_input1 = GagesDamDataModel(gages_model1_train, nid_input, True, gage_main_dam_purpose) gages_input1 = choose_which_purpose( data_input1, purpose=gage_main_dam_purpose_unique[i]) data_input2 = GagesDamDataModel(gages_model2_train, nid_input, True, gage_main_dam_purpose) gages_input2 = choose_which_purpose( data_input2, purpose=gage_main_dam_purpose_unique[i]) data_model = GagesInvDataModel(gages_input1, gages_input2) # pre_trained_model_epoch = 165 train_lstm_inv(data_model)
def test_gages_dam_all_save(self): quick_data_dir = os.path.join(self.config_data.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "conus-all_90-10_nan-0.0_00-1.0") data_model_train = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') gages_model_train = GagesModel.update_data_model( self.config_data, data_model_train) data_model_test = GagesModel.load_datamodel( data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') gages_model_test = GagesModel.update_data_model( self.config_data, data_model_test, train_stat_dict=gages_model_train.stat_dict) nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "test") nid_input = NidModel.load_nidmodel( nid_dir, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) data_input = GagesDamDataModel(gages_model_test, nid_input, gage_main_dam_purpose) data_model_dam = choose_which_purpose(data_input) save_datamodel(data_model_dam, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json')
def test_damcls_test_datamodel(self): quick_data_dir = os.path.join(self.config_data.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "allnonref_85-05_nan-0.1_00-1.0") data_model_train = GagesModel.load_datamodel(data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') data_model_test = GagesModel.load_datamodel(data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') gages_model_train = GagesModel.update_data_model(self.config_data, data_model_train) df = GagesModel.update_data_model(self.config_data, data_model_test, train_stat_dict=gages_model_train.stat_dict) nid_dir = os.path.join("/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "quickdata") nid_input = NidModel.load_nidmodel(nid_dir, nid_file=self.nid_file, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json(os.path.join(nid_dir, "dam_main_purpose_dict.json")) gage_main_dam_purpose_lst = list(gage_main_dam_purpose.values()) gage_main_dam_purpose_unique = np.unique(gage_main_dam_purpose_lst) data_input = GagesDamDataModel(df, nid_input, True, gage_main_dam_purpose) for i in range(gage_main_dam_purpose_unique.size): gages_input = choose_which_purpose(data_input, purpose=gage_main_dam_purpose_unique[i]) save_datamodel(gages_input, gage_main_dam_purpose_unique[i], data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow', forcing_file_name='test_forcing', attr_file_name='test_attr', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json')
def test_dam_test(self): quick_data_dir = os.path.join(self.config_data_1.data_path["DB"], "quickdata") data_dir = os.path.join(quick_data_dir, "allnonref_85-05_nan-0.1_00-1.0") # for inv model, datamodel of train and test are same data_model_8595 = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') # for 2nd model, datamodel of train and test belong to parts of the test time data_model_9505 = GagesModel.load_datamodel( data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') t_range1_test = self.config_data_1.model_dict["data"]["tRangeTest"] # Because we know data of period "90-95", so that we can get its statistics according to this period gages_model1_test = GagesModel.update_data_model( self.config_data_1, data_model_8595, t_range_update=t_range1_test, data_attr_update=True) t_range2_train = self.config_data_2.model_dict["data"]["tRangeTrain"] t_range2_test = self.config_data_2.model_dict["data"]["tRangeTest"] gages_model2_train = GagesModel.update_data_model( self.config_data_2, data_model_8595, t_range_update=t_range2_train, data_attr_update=True) gages_model2_test = GagesModel.update_data_model( self.config_data_2, data_model_9505, t_range_update=t_range2_test, data_attr_update=True, train_stat_dict=gages_model2_train.stat_dict) nid_dir = os.path.join( "/".join(self.config_data_2.data_path["DB"].split("/")[:-1]), "nid", "quickdata") nid_input = NidModel.load_nidmodel( nid_dir, nid_file=self.nid_file, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) data_input1 = GagesDamDataModel(gages_model1_test, nid_input, True, gage_main_dam_purpose) df1 = choose_which_purpose(data_input1) data_input2 = GagesDamDataModel(gages_model2_test, nid_input, True, gage_main_dam_purpose) df2 = choose_which_purpose(data_input2) with torch.cuda.device(2): data_model = GagesInvDataModel(df1, df2) pred, obs = test_lstm_inv(data_model, epoch=self.test_epoch) basin_area = df2.data_source.read_attr(df2.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = df2.data_source.read_attr(df2.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result(df2.data_source.data_config.data_path['Temp'], self.test_epoch, pred, obs)
def test_dam_train(self): quick_data_dir = os.path.join(self.config_data.data_path["DB"], "quickdata") sim_data_dir = os.path.join(quick_data_dir, "allref_85-05_nan-0.1_00-1.0") data_dir = os.path.join(quick_data_dir, "allnonref_85-05_nan-0.1_00-1.0") data_model_sim8595 = GagesModel.load_datamodel( sim_data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') data_model_8595 = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') sim_gages_model_train = GagesModel.update_data_model( self.sim_config_data, data_model_sim8595, data_attr_update=True) gages_model_train = GagesModel.update_data_model(self.config_data, data_model_8595, data_attr_update=True) nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "quickdata") gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) gage_main_dam_purpose_lst = list(gage_main_dam_purpose.values()) gage_main_dam_purpose_unique = np.unique(gage_main_dam_purpose_lst) nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "quickdata") nid_input = NidModel.load_nidmodel( nid_dir, nid_file=self.nid_file, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) data_input = GagesDamDataModel(gages_model_train, nid_input, True, gage_main_dam_purpose) with torch.cuda.device(0): for i in range(0, gage_main_dam_purpose_unique.size): sim_gages_model_train.update_model_param('train', nEpoch=300) gages_input = choose_which_purpose( data_input, purpose=gage_main_dam_purpose_unique[i]) new_temp_dir = os.path.join( gages_input.data_source.data_config.model_dict["dir"] ["Temp"], gage_main_dam_purpose_unique[i]) new_out_dir = os.path.join( gages_input.data_source.data_config.model_dict["dir"] ["Out"], gage_main_dam_purpose_unique[i]) gages_input.update_datamodel_dir(new_temp_dir, new_out_dir) data_model = GagesSimDataModel(sim_gages_model_train, gages_input) # pre_trained_model_epoch = 25 # master_train_natural_flow(data_model, pre_trained_model_epoch=pre_trained_model_epoch) master_train_natural_flow(data_model)
def test_dam_test(self): quick_data_dir = os.path.join(self.config_data.data_path["DB"], "quickdata") sim_data_dir = os.path.join(quick_data_dir, "allref_85-05_nan-0.1_00-1.0") data_dir = os.path.join(quick_data_dir, "allnonref_85-05_nan-0.1_00-1.0") data_model_sim8595 = GagesModel.load_datamodel( sim_data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') data_model_8595 = GagesModel.load_datamodel( data_dir, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') data_model_sim9505 = GagesModel.load_datamodel( sim_data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') data_model_9505 = GagesModel.load_datamodel( data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') sim_gages_model_train = GagesModel.update_data_model( self.sim_config_data, data_model_sim8595, data_attr_update=True) gages_model_train = GagesModel.update_data_model(self.config_data, data_model_8595, data_attr_update=True) sim_gages_model_test = GagesModel.update_data_model( self.sim_config_data, data_model_sim9505, data_attr_update=True, train_stat_dict=sim_gages_model_train.stat_dict) gages_model_test = GagesModel.update_data_model( self.config_data, data_model_9505, data_attr_update=True, train_stat_dict=gages_model_train.stat_dict) nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "quickdata") nid_input = NidModel.load_nidmodel( nid_dir, nid_file=self.nid_file, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) gage_main_dam_purpose_lst = list(gage_main_dam_purpose.values()) gage_main_dam_purpose_unique = np.unique(gage_main_dam_purpose_lst) data_input = GagesDamDataModel(gages_model_test, nid_input, True, gage_main_dam_purpose) for i in range(0, gage_main_dam_purpose_unique.size): sim_gages_model_test.update_model_param('train', nEpoch=300) gages_input = choose_which_purpose( data_input, purpose=gage_main_dam_purpose_unique[i]) new_temp_dir = os.path.join( gages_input.data_source.data_config.model_dict["dir"]["Temp"], gage_main_dam_purpose_unique[i]) new_out_dir = os.path.join( gages_input.data_source.data_config.model_dict["dir"]["Out"], gage_main_dam_purpose_unique[i]) gages_input.update_datamodel_dir(new_temp_dir, new_out_dir) model_input = GagesSimDataModel(sim_gages_model_test, gages_input) pred, obs = master_test_natural_flow(model_input, epoch=self.test_epoch) basin_area = model_input.data_model2.data_source.read_attr( model_input.data_model2.t_s_dict["sites_id"], ['DRAIN_SQKM'], is_return_dict=False) mean_prep = model_input.data_model2.data_source.read_attr( model_input.data_model2.t_s_dict["sites_id"], ['PPTAVG_BASIN'], is_return_dict=False) mean_prep = mean_prep / 365 * 10 pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) save_result( model_input.data_model2.data_source.data_config. data_path['Temp'], str(self.test_epoch), pred, obs) plot_we_need(gages_input, obs, pred, id_col="STAID", lon_col="LNG_GAGE", lat_col="LAT_GAGE")
def test_gages_nse_dam_attr(self): figure_dpi = 600 config_data = self.config_data data_dir = config_data.data_path["Temp"] data_model = GagesModel.load_datamodel( data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') gages_id = data_model.t_s_dict["sites_id"] exp_lst = [ "basic_exp37", "basic_exp39", "basic_exp40", "basic_exp41", "basic_exp42", "basic_exp43" ] self.inds_df, pred_mean, obs_mean = load_ensemble_result( config_data.config_file, exp_lst, config_data.config_file.TEST_EPOCH, return_value=True) show_ind_key = 'NSE' plt.rcParams['font.family'] = 'serif' plt.rcParams['font.serif'] = ['Times New Roman' ] + plt.rcParams['font.serif'] # plot NSE-DOR attr_lst = ["RUNAVE7100", "STOR_NOR_2009"] attrs_runavg_stor = data_model.data_source.read_attr( gages_id, attr_lst, is_return_dict=False) run_avg = attrs_runavg_stor[:, 0] * (10**(-3)) * (10**6 ) # m^3 per year nor_storage = attrs_runavg_stor[:, 1] * 1000 # m^3 dors = nor_storage / run_avg # dor = 0 is not totally same with dam_num=0 (some dammed basins' dor is about 0.00), # here for zero-dor we mainly rely on dam_num = 0 attr_dam_num = ["NDAMS_2009"] attrs_dam_num = data_model.data_source.read_attr(gages_id, attr_dam_num, is_return_dict=False) df = pd.DataFrame({ "DOR": dors, "DAM_NUM": attrs_dam_num[:, 0], show_ind_key: self.inds_df[show_ind_key].values }) hydro_logger.info("statistics of dors:\n %s", df.describe()) hydro_logger.info("percentiles of dors:\n %s", df.quantile(q=0.95)) hydro_logger.info("ecdf of dors:\n %s", ecdf(dors)) # boxplot # add a column to represent the dor range for the df dor_value_range_lst = [[0, 0], [0, 0.02], [0.02, 0.05], [0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [0.4, 0.8], [0.8, 10000]] dor_range_lst = ["0"] + [ str(dor_value_range_lst[i][0]) + "-" + str(dor_value_range_lst[i][1]) for i in range(1, len(dor_value_range_lst) - 1) ] + [">" + str(dor_value_range_lst[-1][0])] # add a column to represent the dam_num range for the df dam_num_value_range_lst = [[0, 0], [0, 1], [1, 3], [3, 5], [5, 10], [10, 20], [20, 50], [50, 10000]] dam_num_range_lst = ["0", "1"] + [ str(dam_num_value_range_lst[i][0]) + "-" + str(dam_num_value_range_lst[i][1]) for i in range(2, len(dam_num_value_range_lst) - 1) ] + [">" + str(dam_num_value_range_lst[-1][0])] def in_which_range(value_temp): if value_temp == 0: return "0" the_range = [ a_range for a_range in dor_value_range_lst if a_range[0] < value_temp <= a_range[1] ] if the_range[0][0] == dor_value_range_lst[-1][0]: the_range_str = ">" + str(the_range[0][0]) else: the_range_str = str(the_range[0][0]) + "-" + str( the_range[0][1]) return the_range_str def in_which_dam_num_range(value_tmp): if value_tmp == 0: return "0" if value_tmp == 1: return "1" the_ran = [ a_ran for a_ran in dam_num_value_range_lst if a_ran[0] < value_tmp <= a_ran[1] ] if the_ran[0][0] == dam_num_value_range_lst[-1][0]: the_ran_str = ">" + str(the_ran[0][0]) else: the_ran_str = str(the_ran[0][0]) + "-" + str(the_ran[0][1]) return the_ran_str df["DOR_RANGE"] = df["DOR"].apply(in_which_range) df["DAM_NUM_RANGE"] = df["DAM_NUM"].apply(in_which_dam_num_range) df.loc[(df["DAM_NUM"] > 0) & (df["DOR_RANGE"] == "0"), "DOR_RANGE"] = dor_range_lst[1] shown_nse_range_boxplots = [-0.5, 1.0] sns.set(font="serif", font_scale=1.5, color_codes=True) plot_boxs(df, "DOR_RANGE", show_ind_key, ylim=shown_nse_range_boxplots, order=dor_range_lst) plt.savefig(os.path.join( config_data.data_path["Out"], 'NSE~DOR-boxplots-' + str(shown_nse_range_boxplots) + '.png'), dpi=figure_dpi, bbox_inches="tight") plt.figure() shown_nse_range_boxplots = [0, 1.0] sns.set(font="serif", font_scale=1.5, color_codes=True) plot_boxs(df, "DAM_NUM_RANGE", show_ind_key, ylim=shown_nse_range_boxplots, order=dam_num_range_lst) plt.savefig(os.path.join( config_data.data_path["Out"], 'NSE~DAM_NUM-boxplots-' + str(shown_nse_range_boxplots) + '.png'), dpi=figure_dpi, bbox_inches="tight") nums_in_dor_range = [ df[df["DOR_RANGE"] == a_range_rmp].shape[0] for a_range_rmp in dor_range_lst ] ratios_in_dor_range = [ a_num / df.shape[0] for a_num in nums_in_dor_range ] hydro_logger.info( "the number and ratio of basins in each dor range\n: %s \n %s", nums_in_dor_range, ratios_in_dor_range) nums_in_dam_num_range = [ df[df["DAM_NUM_RANGE"] == a_range_rmp].shape[0] for a_range_rmp in dam_num_range_lst ] ratios_in_dam_num_range = [ a_num / df.shape[0] for a_num in nums_in_dam_num_range ] hydro_logger.info( "the number and ratio of basins in each dam_num range\n: %s \n %s", nums_in_dam_num_range, ratios_in_dam_num_range) # regplot plt.figure() sns.set(font="serif", font_scale=1.5, color_codes=True) sr = sns.regplot(x="DOR", y=show_ind_key, data=df[df[show_ind_key] >= 0], scatter_kws={'s': 10}) show_dor_max = df.quantile( q=0.95)["DOR"] # 30 # max(dors) # 0.8 # 10 show_dor_min = min(dors) plt.ylim(0, 1) plt.xlim(show_dor_min, show_dor_max) plt.savefig(os.path.join( config_data.data_path["Out"], 'NSE~DOR-shown-max-' + str(show_dor_max) + '.png'), dpi=figure_dpi, bbox_inches="tight") # jointplot # dor_range = [0.2, 0.9] dor_range = [0.002, 0.2] # plt.figure() sns.set(font="serif", font_scale=1.5, color_codes=True) # g = sns.jointplot(x="DOR", y=show_ind_key, data=df[(df["DOR"] < 1) & (df[show_ind_key] >= 0)], kind="reg", # marginal_kws=dict(bins=25)) # g = sns.jointplot(x="DOR", y=show_ind_key, data=df[(df["DOR"] < 1) & (df[show_ind_key] >= 0)], kind="hex", # color="b", marginal_kws=dict(bins=50)) g = sns.jointplot( x="DOR", y=show_ind_key, data=df[(df["DOR"] < dor_range[1]) & (df["DOR"] > dor_range[0]) & (df[show_ind_key] >= 0)], kind="hex", color="b") g.ax_marg_x.set_xlim(dor_range[0], dor_range[1]) # g.ax_marg_y.set_ylim(-0.5, 1) plt.savefig(os.path.join( config_data.data_path["Out"], 'NSE~DOR(range-)' + str(dor_range) + '-jointplot.png'), dpi=figure_dpi, bbox_inches="tight") nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "test") nid_input = NidModel.load_nidmodel( nid_dir, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) data_input = GagesDamDataModel(data_model, nid_input, gage_main_dam_purpose) dam_coords = unserialize_json_ordered( os.path.join(nid_dir, "dam_points_dict.json")) dam_storages = unserialize_json_ordered( os.path.join(nid_dir, "dam_storages_dict.json")) dam_ids_1 = list(gage_main_dam_purpose.keys()) dam_ids_2 = list(dam_coords.keys()) dam_ids_3 = list(dam_storages.keys()) assert (all(x < y for x, y in zip(dam_ids_1, dam_ids_1[1:]))) assert (all(x < y for x, y in zip(dam_ids_2, dam_ids_2[1:]))) assert (all(x < y for x, y in zip(dam_ids_3, dam_ids_3[1:]))) sites = list(dam_coords.keys()) c, ind1, idx_lst_nse_range = np.intersect1d(sites, gages_id, return_indices=True) std_storage_in_a_basin = list(map(np.std, dam_storages.values())) log_std_storage_in_a_basin = list( map(np.log, np.array(std_storage_in_a_basin) + 1)) nse_values = self.inds_df["NSE"].values[idx_lst_nse_range] df = pd.DataFrame({ "DAM_STORAGE_STD": log_std_storage_in_a_basin, show_ind_key: nse_values }) plt.figure() sns.set(font="serif", font_scale=1.5, color_codes=True) g = sns.regplot(x="DAM_STORAGE_STD", y=show_ind_key, data=df[df[show_ind_key] >= 0], scatter_kws={'s': 10}) show_max = max(log_std_storage_in_a_basin) show_min = min(log_std_storage_in_a_basin) if show_min < 0: show_min = 0 # g.ax_marg_x.set_xlim(show_min, show_max) # g.ax_marg_y.set_ylim(0, 1) plt.ylim(0, 1) plt.xlim(show_min, show_max) plt.savefig(os.path.join(config_data.data_path["Out"], 'NSE~' + "DAM_STORAGE_STD" + '.png'), dpi=figure_dpi, bbox_inches="tight") gages_loc_lat = data_model.data_source.gage_dict["LAT_GAGE"] gages_loc_lon = data_model.data_source.gage_dict["LNG_GAGE"] gages_loc = [[gages_loc_lat[i], gages_loc_lon[i]] for i in range(len(gages_id))] # calculate index of dispersion, then plot the NSE-dispersion scatterplot # Geo coord system of gages_loc and dam_coords are both NAD83 coefficient_of_var = list( map(coefficient_of_variation, gages_loc, dam_coords.values())) coefficient_of_var_min = min(coefficient_of_var) coefficient_of_var_max = max(coefficient_of_var) dispersion_var = "DAM_GAGE_DIS_VAR" nse_values = self.inds_df["NSE"].values[idx_lst_nse_range] df = pd.DataFrame({ dispersion_var: coefficient_of_var, show_ind_key: nse_values }) plt.figure() sns.set(font="serif", font_scale=1.5, color_codes=True) g = sns.regplot(x=dispersion_var, y=show_ind_key, data=df[df[show_ind_key] >= 0], scatter_kws={'s': 10}) show_max = coefficient_of_var_max show_min = coefficient_of_var_min if show_min < 0: show_min = 0 # g.ax_marg_x.set_xlim(show_min, show_max) # g.ax_marg_y.set_ylim(0, 1) plt.ylim(0, 1) plt.xlim(show_min, show_max) plt.savefig(os.path.join(config_data.data_path["Out"], 'NSE~' + dispersion_var + '.png'), dpi=figure_dpi, bbox_inches="tight") idx_dispersions = list( map(ind_of_dispersion, gages_loc, dam_coords.values())) idx_dispersion_min = min(idx_dispersions) idx_dispersion_max = max(idx_dispersions) dispersion_var = "DAM_DISPERSION_BASIN" # nse_range = [0, 1] # idx_lst_nse_range = inds_df_now[(inds_df_now[show_ind_key] >= nse_range[0]) & (inds_df_now[show_ind_key] < nse_range[1])].index.tolist() nse_values = self.inds_df["NSE"].values[idx_lst_nse_range] df = pd.DataFrame({ dispersion_var: idx_dispersions, show_ind_key: nse_values }) # g = sns.regplot(x=dispersion_var, y=show_ind_key, data=df[df[show_ind_key] >= 0], scatter_kws={'s': 10}) if idx_dispersion_min < 0: idx_dispersion_min = 0 plt.ylim(0, 1) plt.xlim(idx_dispersion_min, idx_dispersion_max) # plt.figure() sns.set(font="serif", font_scale=1.5, color_codes=True) g = sns.jointplot(x=dispersion_var, y=show_ind_key, data=df[df[show_ind_key] >= 0], kind="reg") g.ax_marg_x.set_xlim(idx_dispersion_min, idx_dispersion_max) g.ax_marg_y.set_ylim(0, 1) plt.show()
include=True): diversions[i] = "yes" nid_gene_file = os.path.join(cfg.NID.NID_DIR, "test", "dam_main_purpose_dict.json") if not os.path.isfile(nid_gene_file): df = GagesModel.load_datamodel(cfg.CACHE.DATA_DIR, data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') nid_input = NidModel(cfg) nid_dir = os.path.join(cfg.NID.NID_DIR, "test") save_nidinput(nid_input, nid_dir, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') data_input = GagesDamDataModel(df, nid_input) serialize_json(data_input.gage_main_dam_purpose, os.path.join(nid_dir, "dam_main_purpose_dict.json")) gage_main_dam_purpose = unserialize_json(nid_gene_file) gage_main_dam_purpose_lst = list(gage_main_dam_purpose.values()) gage_main_dam_purpose_lst_merge = "".join(gage_main_dam_purpose_lst) gage_main_dam_purpose_unique = np.unique( list(gage_main_dam_purpose_lst_merge)) # gage_main_dam_purpose_unique = np.unique(gage_main_dam_purpose_lst) purpose_regions = {}