text="""<p><b>{}</b> rivers with stations at risk, the top 5 is tabulated below.</p>""".format( len(rivers_with_station(risky_stations))), width=600 ) warning_text2.sizing_mode = 'scale_width' risky_rivers = rivers_by_station_number(risky_stations, 5) risky_rivers_source = ColumnDataSource( data=dict(name=[i[0] for i in risky_rivers], num=[i[1] for i in risky_rivers]) ) risky_river_table_columns = [ TableColumn(field="name", title="River Name"), TableColumn(field="num", title="Number of Risky Stations"), ] risky_river_table = DataTable(source=risky_rivers_source, columns=risky_river_table_columns, width=500, height=140) risky_river_table.sizing_mode = 'scale_width' # Risky towns risky_towns = [] key_station_in_cluster = [] mean_levels = [] for label, s in label_to_stations.items(): cluster_levels = np.array([i.relative_water_level() for i in s]) mean_levels.append(cluster_levels.mean()) key_station_in_cluster.append(s[np.argmax(cluster_levels)]) for i in s: risky_towns.append(i.town) risky_indx = risky_name_to_indx[i.name] risky_source.data['risk'][risky_indx], risky_source.data['alpha'][risky_indx] = 'High', 1.0 risky_towns = set(risky_towns) # to find the total number of risky towns
def fill_experiment_table(): """Populates the table that displays the results from previously ran experiments. The results are stored as yaml files in the directory that the global variable EXPERIMENT_SERIALIZATION_DIR points to. """ global data_table, table_source, table_data_df os.chdir(EXPERIMENT_SERIALIZATION_DIR) experiment_files = glob.glob("*.yml") names = [''] * len(experiment_files) Ls = [0] * len(experiment_files) reward_types = [''] * len(experiment_files) window_size = [''] * len(experiment_files) window_step = [''] * len(experiment_files) seq_or_con = [''] * len(experiment_files) for i, filename in enumerate(experiment_files): with open(filename, 'r') as ymlfile: experiment_data = yaml.safe_load(ymlfile) names[i] = filename Ls[i] = experiment_data['L'] reward_path = experiment_data['reward_path'] if 'threshold' in reward_path: reward_types[i] = 'threshold' + reward_path.split('threshold_')[ 1][0:3] elif 'top' in experiment_data['reward_path']: reward_types[i] = 'top' else: if 'pear' in reward_path: reward_types[i] = 'continous-pearson' elif 'MI_n1' in reward_path: reward_types[i] = 'continous-MI-normalized' else: reward_types[i] = 'continous-MI' window_size[i], window_step[i] = re.findall( r'\d+', reward_path)[0:2] seq_or_con[i] = reward_path.split('/')[-1][0:3] table_data = dict( names=names, seq_or_con=seq_or_con, Ls=Ls, reward_types=reward_types, window_size=window_size, window_step=window_step, ) table_data_df = pd.DataFrame(data=table_data) table_data_df = table_data_df.sort_values( ['seq_or_con', 'Ls', 'window_size', 'window_step', 'reward_types']) table_source = ColumnDataSource(data=table_data_df) table_source.selected.on_change('indices', load_experiment) columns = [ TableColumn(field="names", title="Experiment"), TableColumn(field="seq_or_con", title="Sequential/ concurrent"), TableColumn(field="Ls", title="L"), TableColumn(field="reward_types", title="Reward Type"), TableColumn(field='window_size', title="Window Size"), TableColumn(field='window_step', title="Window Step"), ] data_table = DataTable(source=table_source, columns=columns, height=200) data_table.sizing_mode = 'stretch_width' global pol_table, pol_source columns_pol_table = [ TableColumn(field="pol_name", title="Name"), TableColumn(field="overall_regret", title="Overall regret") ] pol_source = ColumnDataSource(dict(pol_name=[], overall_regret=[])) pol_source.selected.on_change('indices', plot_policy) pol_table = DataTable( source=pol_source, columns=columns_pol_table, height=300) pol_table.sizing_mode = 'stretch_width'