def action_distrubtion(episode, ref_episode): actions_per_sub = actions_model.get_action_per_sub(episode) actions_per_sub.append(actions_model.get_action_per_sub(ref_episode)[0]) y_max = None if len(actions_per_sub[0]["y"]) == 0: figure_subs = go.Figure( layout=layout_no_data("No Actions on subs for this Agent")) else: figure_subs = go.Figure(layout=layout_def, data=actions_per_sub) y_max = max(map(max_or_zero, [trace.y for trace in actions_per_sub])) + 1 actions_per_lines = actions_model.get_action_per_line(episode) actions_per_lines.append(actions_model.get_action_per_line(ref_episode)[0]) if len(actions_per_lines[0]["y"]) == 0: figure_lines = go.Figure( layout=layout_no_data("No Actions on lines for this Agent")) else: figure_lines = go.Figure(layout=layout_def, data=actions_per_lines) if y_max is None: y_max = max( map(max_or_zero, [trace.y for trace in actions_per_lines])) + 1 if max(map(max_or_zero, [trace.y for trace in actions_per_lines])) > y_max: y_max = max( map(max_or_zero, [trace.y for trace in actions_per_lines])) + 1 actions_redisp = actions_model.get_action_redispatch(episode) actions_redisp.append(actions_model.get_action_redispatch(ref_episode)[0]) if len(actions_redisp[0]["y"]) == 0: figure_redisp = go.Figure( layout=layout_no_data("No redispatching actions for this Agent")) else: figure_redisp = go.Figure(layout=layout_def, data=actions_redisp) if y_max is None: y_max = max(map(max_or_zero, [trace.y for trace in actions_redisp])) + 1 if max(map(max_or_zero, [trace.y for trace in actions_redisp])) > y_max: y_max = max(map(max_or_zero, [trace.y for trace in actions_redisp])) + 1 if y_max: figure_subs.update_yaxes(range=[0, y_max]) figure_lines.update_yaxes(range=[0, y_max]) figure_redisp.update_yaxes(range=[0, y_max]) return ActionsDistribution(on_subs=figure_subs, on_lines=figure_lines, redisp=figure_redisp)
def test_multi_topo(self): self.agent_name = "multiTopology-baseline" self.scenario_name = "000" self.episode_data = EpisodeData.from_disk( os.path.join(self.agents_path, self.agent_name), self.scenario_name ) self.episode_analytics = EpisodeAnalytics( self.episode_data, self.scenario_name, self.agent_name ) nb_actions = self.episode_analytics.action_data_table[ ["action_line", "action_subs"] ].sum() self.assertEqual(nb_actions.action_line, 25.0) self.assertEqual(nb_actions.action_subs, 38.0) action_per_line = get_action_per_line(self.episode_analytics) action_per_sub = get_action_per_sub(self.episode_analytics) # We need to sort the labels for which values are equal. # Otherwise, the output is random. self.assertListEqual(sorted(action_per_sub[0].x.tolist()), ["sub_3", "sub_5"]) self.assertListEqual(action_per_sub[0].y.tolist(), [19, 19]) self.assertListEqual(action_per_line[0].x.tolist(), ["3_6_15", "9_10_12"]) self.assertListEqual(action_per_line[0].y.tolist(), [13, 12]) self.assertListEqual( self.episode_analytics.action_data_table.action_id[:5].tolist(), [0, 1, 1, 2, 3], ) self.assertListEqual( self.episode_analytics.action_data_table.distance[:5].tolist(), [1, 2, 2, 0, 3], )
def update_agent_log_action_graphs(study_agent, figure_sub, figure_switch_line, scenario): new_episode = make_episode(study_agent, scenario) figure_sub["data"] = actions_model.get_action_per_sub(new_episode) figure_switch_line["data"] = actions_model.get_action_per_line(new_episode) figure_sub["layout"].update( actions_model.update_layout( len(figure_sub["data"][0]["x"]) == 0, "No Actions on subs for this Agent")) figure_switch_line["layout"].update( actions_model.update_layout( len(figure_switch_line["data"][0]["x"]) == 0, "No Actions on lines for this Agent")) return figure_sub, figure_switch_line
def action_distrubtion(episode): actions_per_sub = actions_model.get_action_per_sub(episode) if len(actions_per_sub[0]["y"]) == 0: figure_subs = go.Figure( layout=layout_no_data("No Actions on subs for this Agent")) else: figure_subs = go.Figure(layout=layout_def, data=actions_per_sub) actions_per_lines = actions_model.get_action_per_line(episode) if len(actions_per_lines[0]["y"]) == 0: figure_lines = go.Figure( layout=layout_no_data("No Actions on lines for this Agent")) else: figure_lines = go.Figure(layout=layout_def, data=actions_per_lines) return ActionsDistribution(on_subs=figure_subs, on_lines=figure_lines)
def test_action_repartition(self): nb_actions = self.episode_analytics.action_data_table[ ["action_line", "action_subs"] ].sum() self.assertEqual(nb_actions.action_line, 0.0) self.assertEqual(nb_actions.action_subs, 31.0) action_per_line = get_action_per_line(self.episode_analytics) action_per_sub = get_action_per_sub(self.episode_analytics) self.assertListEqual(action_per_line[0].x.tolist(), []) # We need to sort the labels for which values are equal. # Otherwise, the output is random. self.assertListEqual( [ *action_per_sub[0].x.tolist()[:-2], *sorted(action_per_sub[0].x.tolist()[-2:]), ], ["sub_4", "sub_1", "sub_3", "sub_8", "sub_9"], ) self.assertListEqual(action_per_sub[0].y.tolist(), [13, 8, 4, 3, 3])
def test_action_repartition(self): nb_actions = self.episode_analytics.action_data_table[ ['action_line', 'action_subs']].sum() self.assertEqual(nb_actions.action_line, 0.0) self.assertEqual(nb_actions.action_subs, 177.0) action_per_line = get_action_per_line(self.episode_analytics) action_per_sub = get_action_per_sub(self.episode_analytics) self.assertListEqual( action_per_line[0].x.tolist(), [] ) # We need to sort the labels for which values are equal. # Otherwise, the output is random. self.assertListEqual( [*action_per_sub[0].x.tolist()[:-2], *sorted(action_per_sub[0].x.tolist()[-2:])], ['sub_9', 'sub_8', 'sub_4', 'sub_12', 'sub_1', 'sub_3'] ) self.assertListEqual( action_per_sub[0].y.tolist(), [106, 45, 12, 10, 2, 2] )
def update_agent_log_action_graphs(study_agent, ref_agent, figure_sub, figure_switch_line, figure_redisp, scenario): new_episode = make_episode(study_agent, scenario) ref_episode = make_episode(ref_agent, scenario) y_max = None figure_sub["data"] = actions_model.get_action_per_sub(new_episode) if len(figure_sub["data"][0]["x"]) != 0: figure_sub["data"].append( actions_model.get_action_per_sub(ref_episode)[0]) y_max = max( map(max_or_zero, [trace.y for trace in figure_sub["data"]])) + 1 figure_switch_line["data"] = actions_model.get_action_per_line( new_episode) if len(figure_switch_line["data"][0]["x"]) != 0: figure_switch_line["data"].append( actions_model.get_action_per_line(ref_episode)[0]) if y_max is None: y_max = (max( map( max_or_zero, [trace.y for trace in figure_switch_line["data"]], )) + 1) if (max( map(max_or_zero, [trace.y for trace in figure_switch_line["data"]])) > y_max): y_max = (max( map( max_or_zero, [trace.y for trace in figure_switch_line["data"]], )) + 1) figure_redisp["data"] = actions_model.get_action_redispatch( new_episode) if len(figure_redisp["data"][0]["x"]) != 0: figure_redisp["data"].append( actions_model.get_action_redispatch(ref_episode)[0]) if y_max is None: y_max = (max( map(max_or_zero, [trace.y for trace in figure_redisp["data"]])) + 1) if (max( map(max_or_zero, [trace.y for trace in figure_redisp["data"]])) > y_max): y_max = (max( map(max_or_zero, [trace.y for trace in figure_redisp["data"]])) + 1) figure_sub["layout"].update( actions_model.update_layout( len(figure_sub["data"][0]["x"]) == 0, "No Actions on subs for this Agent", )) figure_switch_line["layout"].update( actions_model.update_layout( len(figure_switch_line["data"][0]["x"]) == 0, "No Actions on lines for this Agent", )) figure_redisp["layout"].update( actions_model.update_layout( len(figure_redisp["data"][0]["x"]) == 0, "No redispatching actions for this Agent", )) if y_max: figure_sub["layout"]["yaxis"].update(range=[0, y_max]) figure_switch_line["layout"]["yaxis"].update(range=[0, y_max]) figure_redisp["layout"]["yaxis"].update(range=[0, y_max]) return figure_sub, figure_switch_line, figure_redisp
def action_distrubtion(episode): figure_subs = go.Figure(layout=layout_def, data=actions_model.get_action_per_sub(episode)) figure_lines = go.Figure(layout=layout_def, data=actions_model.get_action_per_line(episode)) return ActionsDistribution(on_subs=figure_subs, on_lines=figure_lines)