def test_regions_stat(self): gages_data_model = GagesModel.load_datamodel( self.config_data.data_path["Temp"], data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') id_regions_idx, id_regions_sites_ids = ids_of_regions(gages_data_model) preds, obss, inds_dfs = split_results_to_regions( gages_data_model, self.test_epoch, id_regions_idx, id_regions_sites_ids) regions_name = [ "allref", "cntplain", "esthgnlnd", "mxwdshld", "northest", "secstplain", "seplains", "wstmnts", "wstplains", "wstxeric" ] frames = [] x_name = "regions" y_name = "NSE" for i in range(len(id_regions_idx)): # plot box,使用seaborn库 keys = ["NSE"] inds_test = subset_of_dict(inds_dfs[i], keys) inds_test = inds_test[keys[0]].values df_dict_i = {} str_i = regions_name[i] df_dict_i[x_name] = np.full([inds_test.size], str_i) df_dict_i[y_name] = inds_test df_i = pd.DataFrame(df_dict_i) frames.append(df_i) result = pd.concat(frames) plot_boxs(result, x_name, y_name)
def plot_box_inds(indicators): """绘制观测值和预测值比较的时间序列图""" data = pd.DataFrame(indicators, index=[0]) # 将数据转换为tidy data格式,首先,增加一列名称列,然后剩下的所有值重组到var_name和value_name两列中 indict_name = "indicator" indicts = pd.Series(data.columns.values, name=indict_name) data_t = pd.DataFrame(data.values.T) data_t = pd.concat([indicts, data_t], axis=1) formatted_data = pd.melt(data_t, [indict_name]) formatted_data = formatted_data.sort_values(by=[indict_name]) plot_boxs(formatted_data, x_name=indict_name, y_name='value')
def plot_box_inds(indicators): """plot boxplots in one coordination""" data = pd.DataFrame(indicators) # transform data to "tidy data". Firstly add a column,then assign all other values to "var_name" and "value_name" columns indict_name = "indicator" indicts = pd.Series(data.columns.values, name=indict_name) data_t = pd.DataFrame(data.values.T) data_t = pd.concat([indicts, data_t], axis=1) formatted_data = pd.melt(data_t, [indict_name]) formatted_data = formatted_data.sort_values(by=[indict_name]) box_fig = plot_boxs(formatted_data, x_name=indict_name, y_name='value') return box_fig
def test_3factors(self): data_model = self.data_model config_data = self.config_data test_epoch = self.test_epoch # plot three factors attr_lst = ["RUNAVE7100", "STOR_NOR_2009"] usgs_id = data_model.t_s_dict["sites_id"] attrs_runavg_stor = data_model.data_source.read_attr( usgs_id, attr_lst, is_return_dict=False) run_avg = attrs_runavg_stor[:, 0] * (10**(-3)) * (10**6 ) # m^3 per year nor_storage = attrs_runavg_stor[:, 1] * 1000 # m^3 dors_value = nor_storage / run_avg dors = np.full(len(usgs_id), "dor<0.02") for i in range(len(usgs_id)): if dors_value[i] >= 0.02: dors[i] = "dor≥0.02" diversions = np.full(len(usgs_id), "no ") diversion_strs = ["diversion", "divert"] attr_lst = ["WR_REPORT_REMARKS", "SCREENING_COMMENTS"] data_attr = data_model.data_source.read_attr_origin(usgs_id, attr_lst) diversion_strs_lower = [elem.lower() for elem in diversion_strs] data_attr0_lower = np.array([ elem.lower() if type(elem) == str else elem for elem in data_attr[0] ]) data_attr1_lower = np.array([ elem.lower() if type(elem) == str else elem for elem in data_attr[1] ]) data_attr_lower = np.vstack((data_attr0_lower, data_attr1_lower)).T for i in range(len(usgs_id)): if is_any_elem_in_a_lst(diversion_strs_lower, data_attr_lower[i], include=True): diversions[i] = "yes" nid_dir = os.path.join( "/".join(config_data.data_path["DB"].split("/")[:-1]), "nid", "test") gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) gage_main_dam_purpose_lst = list(gage_main_dam_purpose.values()) gage_main_dam_purpose_lst_merge = "".join(gage_main_dam_purpose_lst) gage_main_dam_purpose_unique = np.unique( list(gage_main_dam_purpose_lst_merge)) # gage_main_dam_purpose_unique = np.unique(gage_main_dam_purpose_lst) purpose_regions = {} for i in range(gage_main_dam_purpose_unique.size): sites_id = [] for key, value in gage_main_dam_purpose.items(): if gage_main_dam_purpose_unique[i] in value: sites_id.append(key) assert (all(x < y for x, y in zip(sites_id, sites_id[1:]))) purpose_regions[gage_main_dam_purpose_unique[i]] = sites_id id_regions_idx = [] id_regions_sites_ids = [] regions_name = [] show_min_num = 10 df_id_region = np.array(data_model.t_s_dict["sites_id"]) for key, value in purpose_regions.items(): gages_id = value c, ind1, ind2 = np.intersect1d(df_id_region, gages_id, return_indices=True) if c.size < show_min_num: continue assert (all(x < y for x, y in zip(ind1, ind1[1:]))) assert (all(x < y for x, y in zip(c, c[1:]))) id_regions_idx.append(ind1) id_regions_sites_ids.append(c) regions_name.append(key) preds, obss, inds_dfs = split_results_to_regions( data_model, test_epoch, id_regions_idx, id_regions_sites_ids) frames = [] x_name = "purposes" y_name = "NSE" hue_name = "DOR" col_name = "diversion" for i in range(len(id_regions_idx)): # plot box,使用seaborn库 keys = ["NSE"] inds_test = subset_of_dict(inds_dfs[i], keys) inds_test = inds_test[keys[0]].values df_dict_i = {} str_i = regions_name[i] df_dict_i[x_name] = np.full([inds_test.size], str_i) df_dict_i[y_name] = inds_test df_dict_i[hue_name] = dors[id_regions_idx[i]] df_dict_i[col_name] = diversions[id_regions_idx[i]] # df_dict_i[hue_name] = nor_storage[id_regions_idx[i]] df_i = pd.DataFrame(df_dict_i) frames.append(df_i) result = pd.concat(frames) plot_boxs(result, x_name, y_name, ylim=[0, 1.0]) plt.savefig(os.path.join(config_data.data_path["Out"], 'purpose_distribution.png'), dpi=500, bbox_inches="tight") # g = sns.catplot(x=x_name, y=y_name, hue=hue_name, col=col_name, # data=result, kind="swarm", # height=4, aspect=.7) sns.set(font_scale=1.5) fig, ax = plt.subplots() fig.set_size_inches(11.7, 8.27) g = sns.catplot(ax=ax, x=x_name, y=y_name, hue=hue_name, col=col_name, data=result, palette="Set1", kind="box", dodge=True, showfliers=False) # g.set(ylim=(-1, 1)) plt.savefig(os.path.join(config_data.data_path["Out"], '3factors_distribution.png'), dpi=500, bbox_inches="tight") plt.show()
def test_scatter_diversion(self): attr_lst = ["RUNAVE7100", "STOR_NOR_2009"] sites_nonref = self.data_model.t_s_dict["sites_id"] attrs_runavg_stor = self.data_model.data_source.read_attr( sites_nonref, attr_lst, is_return_dict=False) run_avg = attrs_runavg_stor[:, 0] * (10**(-3)) * (10**6 ) # m^3 per year nor_storage = attrs_runavg_stor[:, 1] * 1000 # m^3 dors = nor_storage / run_avg diversion_yes = True diversion_no = False source_data_diversion = GagesSource.choose_some_basins( self.config_data, self.config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, diversion=diversion_yes) source_data_nodivert = GagesSource.choose_some_basins( self.config_data, self.config_data.model_dict["data"]["tRangeTrain"], screen_basin_area_huc4=False, diversion=diversion_no) sites_id_diversion = source_data_diversion.all_configs[ 'flow_screen_gage_id'] sites_id_nodivert = source_data_nodivert.all_configs[ 'flow_screen_gage_id'] divert_regions = {} divert_regions["diversion"] = sites_id_diversion divert_regions["not_diverted"] = sites_id_nodivert id_regions_idx = [] id_regions_sites_ids = [] regions_name = [] df_id_region = np.array(self.data_model.t_s_dict["sites_id"]) for key, value in divert_regions.items(): gages_id = value c, ind1, ind2 = np.intersect1d(df_id_region, gages_id, return_indices=True) assert (all(x < y for x, y in zip(ind1, ind1[1:]))) assert (all(x < y for x, y in zip(c, c[1:]))) id_regions_idx.append(ind1) id_regions_sites_ids.append(c) regions_name.append(key) preds, obss, inds_dfs = split_results_to_regions( self.data_model, self.test_epoch, id_regions_idx, id_regions_sites_ids) frames = [] x_name = "is_diverted" y_name = "NSE" hue_name = "DOR" # hue_name = "STOR" for i in range(len(id_regions_idx)): # plot box,使用seaborn库 keys = ["NSE"] inds_test = subset_of_dict(inds_dfs[i], keys) inds_test = inds_test[keys[0]].values df_dict_i = {} str_i = regions_name[i] df_dict_i[x_name] = np.full([inds_test.size], str_i) df_dict_i[y_name] = inds_test df_dict_i[hue_name] = dors[id_regions_idx[i]] # df_dict_i[hue_name] = nor_storage[id_regions_idx[i]] df_i = pd.DataFrame(df_dict_i) frames.append(df_i) result = pd.concat(frames) # can remove high hue value to keep a good map plot_boxs(result, x_name, y_name, ylim=[-1.0, 1.0]) # plot_boxs(result, x_name, y_name, uniform_color="skyblue", swarm_plot=True, hue=hue_name, colormap=True, # ylim=[-1.0, 1.0]) cmap_str = 'viridis' # cmap = plt.get_cmap('Spectral') cbar_label = hue_name plt.title('Distribution of w/wo diversion') swarmplot_with_cbar(cmap_str, cbar_label, [-1, 1.0], x=x_name, y=y_name, hue=hue_name, palette=cmap_str, data=result)
def test_scatter_dam_purpose(self): attr_lst = ["RUNAVE7100", "STOR_NOR_2009"] sites_nonref = self.data_model.t_s_dict["sites_id"] attrs_runavg_stor = self.data_model.data_source.read_attr( sites_nonref, attr_lst, is_return_dict=False) run_avg = attrs_runavg_stor[:, 0] * (10**(-3)) * (10**6 ) # m^3 per year nor_storage = attrs_runavg_stor[:, 1] * 1000 # m^3 dors = nor_storage / run_avg nid_dir = os.path.join(self.config_data.data_path["DB"], "nid", "test") gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) gage_main_dam_purpose_lst = list(gage_main_dam_purpose.values()) gage_main_dam_purpose_unique = np.unique(gage_main_dam_purpose_lst) purpose_regions = {} for i in range(gage_main_dam_purpose_unique.size): sites_id = [] for key, value in gage_main_dam_purpose.items(): if value == gage_main_dam_purpose_unique[i]: sites_id.append(key) assert (all(x < y for x, y in zip(sites_id, sites_id[1:]))) purpose_regions[gage_main_dam_purpose_unique[i]] = sites_id id_regions_idx = [] id_regions_sites_ids = [] regions_name = [] show_min_num = 10 df_id_region = np.array(self.data_model.t_s_dict["sites_id"]) for key, value in purpose_regions.items(): gages_id = value c, ind1, ind2 = np.intersect1d(df_id_region, gages_id, return_indices=True) if c.size < show_min_num: continue assert (all(x < y for x, y in zip(ind1, ind1[1:]))) assert (all(x < y for x, y in zip(c, c[1:]))) id_regions_idx.append(ind1) id_regions_sites_ids.append(c) regions_name.append(key) preds, obss, inds_dfs = split_results_to_regions( self.data_model, self.test_epoch, id_regions_idx, id_regions_sites_ids) frames = [] x_name = "purposes" y_name = "NSE" hue_name = "DOR" # hue_name = "STOR" for i in range(len(id_regions_idx)): # plot box,使用seaborn库 keys = ["NSE"] inds_test = subset_of_dict(inds_dfs[i], keys) inds_test = inds_test[keys[0]].values df_dict_i = {} str_i = regions_name[i] df_dict_i[x_name] = np.full([inds_test.size], str_i) df_dict_i[y_name] = inds_test df_dict_i[hue_name] = dors[id_regions_idx[i]] # df_dict_i[hue_name] = nor_storage[id_regions_idx[i]] df_i = pd.DataFrame(df_dict_i) frames.append(df_i) result = pd.concat(frames) # can remove high hue value to keep a good map plot_boxs(result, x_name, y_name, ylim=[-1.0, 1.0]) plt.savefig(os.path.join(self.config_data.data_path["Out"], 'purpose_distribution_test.png'), dpi=500, bbox_inches="tight") plt.show() # plot_boxs(result, x_name, y_name, uniform_color="skyblue", swarm_plot=True, hue=hue_name, colormap=True, # ylim=[-1.0, 1.0]) cmap_str = 'viridis' # cmap = plt.get_cmap('Spectral') cbar_label = hue_name plt.title('Distribution of different purposes') swarmplot_with_cbar(cmap_str, cbar_label, [-1, 1.0], x=x_name, y=y_name, hue=hue_name, palette=cmap_str, data=result)
def test_gages_nse_dam_attr(self): figure_dpi = 600 config_data = self.config_data data_dir = config_data.data_path["Temp"] data_model = GagesModel.load_datamodel( data_dir, data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') gages_id = data_model.t_s_dict["sites_id"] exp_lst = [ "basic_exp37", "basic_exp39", "basic_exp40", "basic_exp41", "basic_exp42", "basic_exp43" ] self.inds_df, pred_mean, obs_mean = load_ensemble_result( config_data.config_file, exp_lst, config_data.config_file.TEST_EPOCH, return_value=True) show_ind_key = 'NSE' plt.rcParams['font.family'] = 'serif' plt.rcParams['font.serif'] = ['Times New Roman' ] + plt.rcParams['font.serif'] # plot NSE-DOR attr_lst = ["RUNAVE7100", "STOR_NOR_2009"] attrs_runavg_stor = data_model.data_source.read_attr( gages_id, attr_lst, is_return_dict=False) run_avg = attrs_runavg_stor[:, 0] * (10**(-3)) * (10**6 ) # m^3 per year nor_storage = attrs_runavg_stor[:, 1] * 1000 # m^3 dors = nor_storage / run_avg # dor = 0 is not totally same with dam_num=0 (some dammed basins' dor is about 0.00), # here for zero-dor we mainly rely on dam_num = 0 attr_dam_num = ["NDAMS_2009"] attrs_dam_num = data_model.data_source.read_attr(gages_id, attr_dam_num, is_return_dict=False) df = pd.DataFrame({ "DOR": dors, "DAM_NUM": attrs_dam_num[:, 0], show_ind_key: self.inds_df[show_ind_key].values }) hydro_logger.info("statistics of dors:\n %s", df.describe()) hydro_logger.info("percentiles of dors:\n %s", df.quantile(q=0.95)) hydro_logger.info("ecdf of dors:\n %s", ecdf(dors)) # boxplot # add a column to represent the dor range for the df dor_value_range_lst = [[0, 0], [0, 0.02], [0.02, 0.05], [0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [0.4, 0.8], [0.8, 10000]] dor_range_lst = ["0"] + [ str(dor_value_range_lst[i][0]) + "-" + str(dor_value_range_lst[i][1]) for i in range(1, len(dor_value_range_lst) - 1) ] + [">" + str(dor_value_range_lst[-1][0])] # add a column to represent the dam_num range for the df dam_num_value_range_lst = [[0, 0], [0, 1], [1, 3], [3, 5], [5, 10], [10, 20], [20, 50], [50, 10000]] dam_num_range_lst = ["0", "1"] + [ str(dam_num_value_range_lst[i][0]) + "-" + str(dam_num_value_range_lst[i][1]) for i in range(2, len(dam_num_value_range_lst) - 1) ] + [">" + str(dam_num_value_range_lst[-1][0])] def in_which_range(value_temp): if value_temp == 0: return "0" the_range = [ a_range for a_range in dor_value_range_lst if a_range[0] < value_temp <= a_range[1] ] if the_range[0][0] == dor_value_range_lst[-1][0]: the_range_str = ">" + str(the_range[0][0]) else: the_range_str = str(the_range[0][0]) + "-" + str( the_range[0][1]) return the_range_str def in_which_dam_num_range(value_tmp): if value_tmp == 0: return "0" if value_tmp == 1: return "1" the_ran = [ a_ran for a_ran in dam_num_value_range_lst if a_ran[0] < value_tmp <= a_ran[1] ] if the_ran[0][0] == dam_num_value_range_lst[-1][0]: the_ran_str = ">" + str(the_ran[0][0]) else: the_ran_str = str(the_ran[0][0]) + "-" + str(the_ran[0][1]) return the_ran_str df["DOR_RANGE"] = df["DOR"].apply(in_which_range) df["DAM_NUM_RANGE"] = df["DAM_NUM"].apply(in_which_dam_num_range) df.loc[(df["DAM_NUM"] > 0) & (df["DOR_RANGE"] == "0"), "DOR_RANGE"] = dor_range_lst[1] shown_nse_range_boxplots = [-0.5, 1.0] sns.set(font="serif", font_scale=1.5, color_codes=True) plot_boxs(df, "DOR_RANGE", show_ind_key, ylim=shown_nse_range_boxplots, order=dor_range_lst) plt.savefig(os.path.join( config_data.data_path["Out"], 'NSE~DOR-boxplots-' + str(shown_nse_range_boxplots) + '.png'), dpi=figure_dpi, bbox_inches="tight") plt.figure() shown_nse_range_boxplots = [0, 1.0] sns.set(font="serif", font_scale=1.5, color_codes=True) plot_boxs(df, "DAM_NUM_RANGE", show_ind_key, ylim=shown_nse_range_boxplots, order=dam_num_range_lst) plt.savefig(os.path.join( config_data.data_path["Out"], 'NSE~DAM_NUM-boxplots-' + str(shown_nse_range_boxplots) + '.png'), dpi=figure_dpi, bbox_inches="tight") nums_in_dor_range = [ df[df["DOR_RANGE"] == a_range_rmp].shape[0] for a_range_rmp in dor_range_lst ] ratios_in_dor_range = [ a_num / df.shape[0] for a_num in nums_in_dor_range ] hydro_logger.info( "the number and ratio of basins in each dor range\n: %s \n %s", nums_in_dor_range, ratios_in_dor_range) nums_in_dam_num_range = [ df[df["DAM_NUM_RANGE"] == a_range_rmp].shape[0] for a_range_rmp in dam_num_range_lst ] ratios_in_dam_num_range = [ a_num / df.shape[0] for a_num in nums_in_dam_num_range ] hydro_logger.info( "the number and ratio of basins in each dam_num range\n: %s \n %s", nums_in_dam_num_range, ratios_in_dam_num_range) # regplot plt.figure() sns.set(font="serif", font_scale=1.5, color_codes=True) sr = sns.regplot(x="DOR", y=show_ind_key, data=df[df[show_ind_key] >= 0], scatter_kws={'s': 10}) show_dor_max = df.quantile( q=0.95)["DOR"] # 30 # max(dors) # 0.8 # 10 show_dor_min = min(dors) plt.ylim(0, 1) plt.xlim(show_dor_min, show_dor_max) plt.savefig(os.path.join( config_data.data_path["Out"], 'NSE~DOR-shown-max-' + str(show_dor_max) + '.png'), dpi=figure_dpi, bbox_inches="tight") # jointplot # dor_range = [0.2, 0.9] dor_range = [0.002, 0.2] # plt.figure() sns.set(font="serif", font_scale=1.5, color_codes=True) # g = sns.jointplot(x="DOR", y=show_ind_key, data=df[(df["DOR"] < 1) & (df[show_ind_key] >= 0)], kind="reg", # marginal_kws=dict(bins=25)) # g = sns.jointplot(x="DOR", y=show_ind_key, data=df[(df["DOR"] < 1) & (df[show_ind_key] >= 0)], kind="hex", # color="b", marginal_kws=dict(bins=50)) g = sns.jointplot( x="DOR", y=show_ind_key, data=df[(df["DOR"] < dor_range[1]) & (df["DOR"] > dor_range[0]) & (df[show_ind_key] >= 0)], kind="hex", color="b") g.ax_marg_x.set_xlim(dor_range[0], dor_range[1]) # g.ax_marg_y.set_ylim(-0.5, 1) plt.savefig(os.path.join( config_data.data_path["Out"], 'NSE~DOR(range-)' + str(dor_range) + '-jointplot.png'), dpi=figure_dpi, bbox_inches="tight") nid_dir = os.path.join( "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid", "test") nid_input = NidModel.load_nidmodel( nid_dir, nid_source_file_name='nid_source.txt', nid_data_file_name='nid_data.shp') gage_main_dam_purpose = unserialize_json( os.path.join(nid_dir, "dam_main_purpose_dict.json")) data_input = GagesDamDataModel(data_model, nid_input, gage_main_dam_purpose) dam_coords = unserialize_json_ordered( os.path.join(nid_dir, "dam_points_dict.json")) dam_storages = unserialize_json_ordered( os.path.join(nid_dir, "dam_storages_dict.json")) dam_ids_1 = list(gage_main_dam_purpose.keys()) dam_ids_2 = list(dam_coords.keys()) dam_ids_3 = list(dam_storages.keys()) assert (all(x < y for x, y in zip(dam_ids_1, dam_ids_1[1:]))) assert (all(x < y for x, y in zip(dam_ids_2, dam_ids_2[1:]))) assert (all(x < y for x, y in zip(dam_ids_3, dam_ids_3[1:]))) sites = list(dam_coords.keys()) c, ind1, idx_lst_nse_range = np.intersect1d(sites, gages_id, return_indices=True) std_storage_in_a_basin = list(map(np.std, dam_storages.values())) log_std_storage_in_a_basin = list( map(np.log, np.array(std_storage_in_a_basin) + 1)) nse_values = self.inds_df["NSE"].values[idx_lst_nse_range] df = pd.DataFrame({ "DAM_STORAGE_STD": log_std_storage_in_a_basin, show_ind_key: nse_values }) plt.figure() sns.set(font="serif", font_scale=1.5, color_codes=True) g = sns.regplot(x="DAM_STORAGE_STD", y=show_ind_key, data=df[df[show_ind_key] >= 0], scatter_kws={'s': 10}) show_max = max(log_std_storage_in_a_basin) show_min = min(log_std_storage_in_a_basin) if show_min < 0: show_min = 0 # g.ax_marg_x.set_xlim(show_min, show_max) # g.ax_marg_y.set_ylim(0, 1) plt.ylim(0, 1) plt.xlim(show_min, show_max) plt.savefig(os.path.join(config_data.data_path["Out"], 'NSE~' + "DAM_STORAGE_STD" + '.png'), dpi=figure_dpi, bbox_inches="tight") gages_loc_lat = data_model.data_source.gage_dict["LAT_GAGE"] gages_loc_lon = data_model.data_source.gage_dict["LNG_GAGE"] gages_loc = [[gages_loc_lat[i], gages_loc_lon[i]] for i in range(len(gages_id))] # calculate index of dispersion, then plot the NSE-dispersion scatterplot # Geo coord system of gages_loc and dam_coords are both NAD83 coefficient_of_var = list( map(coefficient_of_variation, gages_loc, dam_coords.values())) coefficient_of_var_min = min(coefficient_of_var) coefficient_of_var_max = max(coefficient_of_var) dispersion_var = "DAM_GAGE_DIS_VAR" nse_values = self.inds_df["NSE"].values[idx_lst_nse_range] df = pd.DataFrame({ dispersion_var: coefficient_of_var, show_ind_key: nse_values }) plt.figure() sns.set(font="serif", font_scale=1.5, color_codes=True) g = sns.regplot(x=dispersion_var, y=show_ind_key, data=df[df[show_ind_key] >= 0], scatter_kws={'s': 10}) show_max = coefficient_of_var_max show_min = coefficient_of_var_min if show_min < 0: show_min = 0 # g.ax_marg_x.set_xlim(show_min, show_max) # g.ax_marg_y.set_ylim(0, 1) plt.ylim(0, 1) plt.xlim(show_min, show_max) plt.savefig(os.path.join(config_data.data_path["Out"], 'NSE~' + dispersion_var + '.png'), dpi=figure_dpi, bbox_inches="tight") idx_dispersions = list( map(ind_of_dispersion, gages_loc, dam_coords.values())) idx_dispersion_min = min(idx_dispersions) idx_dispersion_max = max(idx_dispersions) dispersion_var = "DAM_DISPERSION_BASIN" # nse_range = [0, 1] # idx_lst_nse_range = inds_df_now[(inds_df_now[show_ind_key] >= nse_range[0]) & (inds_df_now[show_ind_key] < nse_range[1])].index.tolist() nse_values = self.inds_df["NSE"].values[idx_lst_nse_range] df = pd.DataFrame({ dispersion_var: idx_dispersions, show_ind_key: nse_values }) # g = sns.regplot(x=dispersion_var, y=show_ind_key, data=df[df[show_ind_key] >= 0], scatter_kws={'s': 10}) if idx_dispersion_min < 0: idx_dispersion_min = 0 plt.ylim(0, 1) plt.xlim(idx_dispersion_min, idx_dispersion_max) # plt.figure() sns.set(font="serif", font_scale=1.5, color_codes=True) g = sns.jointplot(x=dispersion_var, y=show_ind_key, data=df[df[show_ind_key] >= 0], kind="reg") g.ax_marg_x.set_xlim(idx_dispersion_min, idx_dispersion_max) g.ax_marg_y.set_ylim(0, 1) plt.show()
# plot box with seaborn keys = ["NSE"] inds_test = subset_of_dict(inds_dfs[i], keys) inds_test = inds_test[keys[0]].values df_dict_i = {} str_i = regions_name[i] df_dict_i[x_name] = np.full([inds_test.size], str_i) df_dict_i[y_name] = inds_test df_dict_i[hue_name] = dors[id_regions_idx[i]] df_dict_i[col_name] = diversions[id_regions_idx[i]] # df_dict_i[hue_name] = nor_storage[id_regions_idx[i]] df_i = pd.DataFrame(df_dict_i) frames.append(df_i) result = pd.concat(frames) plt.figure() plot_boxs(result, x_name, y_name, ylim=[-0.4, 1.0], rotation=0) plt.savefig(os.path.join(config_data.data_path["Out"], 'purpose_distribution.png'), dpi=FIGURE_DPI, bbox_inches="tight") # g = sns.catplot(x=x_name, y=y_name, hue=hue_name, col=col_name, # data=result, kind="swarm", # height=4, aspect=.7) fig, ax = plt.subplots() fig.set_size_inches(11.7, 8.27) sns.set(font="serif", font_scale=1.5, color_codes=True) g = sns.catplot( ax=ax, x=x_name, y=y_name, hue=hue_name,