def statistics_tables(corrected: pd.DataFrame, simulated: pd.DataFrame, observed: pd.DataFrame) -> pd.DataFrame: # merge the datasets together merged_sim_obs = hd.merge_data(sim_df=simulated, obs_df=observed) merged_cor_obs = hd.merge_data(sim_df=corrected, obs_df=observed) metrics = ['ME', 'RMSE', 'NRMSE (Mean)', 'MAPE', 'NSE', 'KGE (2009)', 'KGE (2012)'] # Merge Data table1 = hs.make_table(merged_dataframe=merged_sim_obs, metrics=metrics) table2 = hs.make_table(merged_dataframe=merged_cor_obs, metrics=metrics) table2 = table2.rename(index={'Full Time Series': 'Corrected Full Time Series'}) table1 = table1.rename(index={'Full Time Series': 'Original Full Time Series'}) table1 = table1.transpose() table2 = table2.transpose() return pd.merge(table1, table2, right_index=True, left_index=True)
def statistics_tables(corrected: pd.DataFrame, simulated: pd.DataFrame, observed: pd.DataFrame, merged_sim_obs: pd.DataFrame = False, merged_cor_obs: pd.DataFrame = False, metrics: list = None) -> str: """ Makes an html table of various statistical metrics for corrected vs observed data alongside the same metrics for the simulated vs observed data as a way to see the improvement made by the bias correction. This function uses hydrostats.data.merge_data on the 3 inputs. If you have already computed these because you are doing a full comparison of bias correction, you can provide them to save time Args: corrected: A dataframe with a datetime index and a single column of streamflow values simulated: A dataframe with a datetime index and a single column of streamflow values observed: A dataframe with a datetime index and a single column of streamflow values merged_sim_obs: (optional) if you have already computed it, hydrostats.data.merge_data(simulated, observed) merged_cor_obs: (optional) if you have already computed it, hydrostats.data.merge_data(corrected, observed) metrics: A list of abbreviated statistic names. See the documentation for HydroErr """ if corrected is False and simulated is False and observed is False: if merged_sim_obs is not False and merged_cor_obs is not False: pass # if you provided the merged dataframes already, we use those else: # merge the datasets together merged_sim_obs = hd.merge_data(sim_df=simulated, obs_df=observed) merged_cor_obs = hd.merge_data(sim_df=corrected, obs_df=observed) if metrics is None: metrics = [ 'ME', 'RMSE', 'NRMSE (Mean)', 'MAPE', 'NSE', 'KGE (2009)', 'KGE (2012)' ] # Merge Data table1 = hs.make_table(merged_dataframe=merged_sim_obs, metrics=metrics) table2 = hs.make_table(merged_dataframe=merged_cor_obs, metrics=metrics) table2 = table2.rename( index={'Full Time Series': 'Corrected Full Time Series'}) table1 = table1.rename( index={'Full Time Series': 'Original Full Time Series'}) table1 = table1.transpose() table2 = table2.transpose() table_final = pd.merge(table1, table2, right_index=True, left_index=True) return table_final.to_html()
'datetime': dates_sim, 'simulated volume (BCM)': sim_df.iloc[:, 0].values }) simData.set_index(['datetime'], inplace=True) #Merging the Data merged_df = hd.merge_data(sim_df=simData, obs_df=obsData, column_names=('Simulated', 'Observed')) '''Tables and Plots''' # Appending the table to the final table table = hs.make_table(merged_df, metrics=[ 'ME', 'MAE', 'MAPE', 'RMSE', 'NRMSE (Mean)', 'NSE', 'KGE (2009)', 'KGE (2012)', 'R (Pearson)', 'R (Spearman)', 'r2' ], location=name, remove_neg=False, remove_zero=False) all_station_table = all_station_table.append(table) #Making plots for all the stations sim_array = merged_df.iloc[:, 0].values obs_array = merged_df.iloc[:, 1].values hv.plot(merged_df, legend=('Simulated', 'Observed'), grid=True, title='Hydrograph for ' + name + '\n River: ' + rio + '. COMID: ' +
def make_table_ajax(request): get_data = request.GET global simulated_df global observed_df global corrected_df try: # Indexing the metrics to get the abbreviations selected_metric_abbr = get_data.getlist("metrics[]", None) # print(selected_metric_abbr) # Retrive additional parameters if they exist # Retrieving the extra optional parameters extra_param_dict = {} if request.GET.get('mase_m', None) is not None: mase_m = float(request.GET.get('mase_m', None)) extra_param_dict['mase_m'] = mase_m else: mase_m = 1 extra_param_dict['mase_m'] = mase_m if request.GET.get('dmod_j', None) is not None: dmod_j = float(request.GET.get('dmod_j', None)) extra_param_dict['dmod_j'] = dmod_j else: dmod_j = 1 extra_param_dict['dmod_j'] = dmod_j if request.GET.get('nse_mod_j', None) is not None: nse_mod_j = float(request.GET.get('nse_mod_j', None)) extra_param_dict['nse_mod_j'] = nse_mod_j else: nse_mod_j = 1 extra_param_dict['nse_mod_j'] = nse_mod_j if request.GET.get('h6_k_MHE', None) is not None: h6_mhe_k = float(request.GET.get('h6_k_MHE', None)) extra_param_dict['h6_mhe_k'] = h6_mhe_k else: h6_mhe_k = 1 extra_param_dict['h6_mhe_k'] = h6_mhe_k if request.GET.get('h6_k_AHE', None) is not None: h6_ahe_k = float(request.GET.get('h6_k_AHE', None)) extra_param_dict['h6_ahe_k'] = h6_ahe_k else: h6_ahe_k = 1 extra_param_dict['h6_ahe_k'] = h6_ahe_k if request.GET.get('h6_k_RMSHE', None) is not None: h6_rmshe_k = float(request.GET.get('h6_k_RMSHE', None)) extra_param_dict['h6_rmshe_k'] = h6_rmshe_k else: h6_rmshe_k = 1 extra_param_dict['h6_rmshe_k'] = h6_rmshe_k if float(request.GET.get('lm_x_bar', None)) != 1: lm_x_bar_p = float(request.GET.get('lm_x_bar', None)) extra_param_dict['lm_x_bar_p'] = lm_x_bar_p else: lm_x_bar_p = None extra_param_dict['lm_x_bar_p'] = lm_x_bar_p if float(request.GET.get('d1_p_x_bar', None)) != 1: d1_p_x_bar_p = float(request.GET.get('d1_p_x_bar', None)) extra_param_dict['d1_p_x_bar_p'] = d1_p_x_bar_p else: d1_p_x_bar_p = None extra_param_dict['d1_p_x_bar_p'] = d1_p_x_bar_p '''Merge Data''' merged_df = hd.merge_data(sim_df=simulated_df, obs_df=observed_df) merged_df2 = hd.merge_data(sim_df=corrected_df, obs_df=observed_df) '''Plotting Data''' # Creating the Table Based on User Input table = hs.make_table( merged_dataframe=merged_df, metrics=selected_metric_abbr, # remove_neg=remove_neg, # remove_zero=remove_zero, mase_m=extra_param_dict['mase_m'], dmod_j=extra_param_dict['dmod_j'], nse_mod_j=extra_param_dict['nse_mod_j'], h6_mhe_k=extra_param_dict['h6_mhe_k'], h6_ahe_k=extra_param_dict['h6_ahe_k'], h6_rmshe_k=extra_param_dict['h6_rmshe_k'], d1_p_obs_bar_p=extra_param_dict['d1_p_x_bar_p'], lm_x_obs_bar_p=extra_param_dict['lm_x_bar_p'], # seasonal_periods=all_date_range_list ) table = table.round(decimals=2) table_html = table.transpose() table_html = table_html.to_html( classes="table table-hover table-striped").replace( 'border="1"', 'border="0"') # Creating the Table Based on User Input table2 = hs.make_table( merged_dataframe=merged_df2, metrics=selected_metric_abbr, # remove_neg=remove_neg, # remove_zero=remove_zero, mase_m=extra_param_dict['mase_m'], dmod_j=extra_param_dict['dmod_j'], nse_mod_j=extra_param_dict['nse_mod_j'], h6_mhe_k=extra_param_dict['h6_mhe_k'], h6_ahe_k=extra_param_dict['h6_ahe_k'], h6_rmshe_k=extra_param_dict['h6_rmshe_k'], d1_p_obs_bar_p=extra_param_dict['d1_p_x_bar_p'], lm_x_obs_bar_p=extra_param_dict['lm_x_bar_p'], # seasonal_periods=all_date_range_list ) table2 = table2.round(decimals=2) table_html2 = table.transpose() table_html2 = table_html2.to_html( classes="table table-hover table-striped").replace( 'border="1"', 'border="0"') table2 = table2.rename( index={'Full Time Series': 'Corrected Full Time Series'}) table = table.rename( index={'Full Time Series': 'Original Full Time Series'}) table_html2 = table2.transpose() table_html1 = table.transpose() table_final = pd.merge(table_html1, table_html2, right_index=True, left_index=True) table_html2 = table_html2.to_html( classes="table table-hover table-striped", table_id="corrected_1").replace('border="1"', 'border="0"') table_final_html = table_final.to_html( classes="table table-hover table-striped", table_id="corrected_1").replace('border="1"', 'border="0"') return HttpResponse(table_final_html) except Exception: traceback.print_exc() return JsonResponse( {'error': 'No data found for the selected station.'})