Esempio n. 1
0
def layout(user_selected_timestamp, study_agent, ref_agent, scenario):
    best_episode = make_episode(best_agents[scenario]["agent"], scenario)
    new_episode = make_episode(study_agent, scenario)
    center_indx = center_index(user_selected_timestamp, new_episode)
    network_graph = make_network_agent_study(new_episode, timestep=center_indx)

    open_help = should_help_open(
        Path(grid2viz_home_directory) / DONT_SHOW_FILENAME("micro"))
    header = "Analyze further your agent"
    body = (
        "Select a time step in the navbar dropdown and analyze what happened "
        "at that time to understand the agent behavior.")
    return html.Div(
        id="micro_page",
        children=[
            indicator_line(),
            flux_inspector_line(
                network_graph,
                slider_params(user_selected_timestamp, new_episode),
            ),
            context_inspector_line(best_episode, new_episode),
            all_info_line,
            modal(id_suffix="micro",
                  is_open=open_help,
                  header=header,
                  body=body),
        ],
    )
Esempio n. 2
0
def update_selected_ref_agent(ref_agent, scenario):
    """
        Change the agent of reference for the given scenario.

        Triggered when user select a new agent with the agent selector on layout.
    """
    make_episode(ref_agent, scenario)
    return ref_agent
Esempio n. 3
0
def make_rewards_ts(study_agent, ref_agent, scenario, layout):
    """
        Make kpi with rewards and cumulated reward for both reference agent and study agent.

        :param study_agent: agent studied
        :param ref_agent: agent to compare with
        :param scenario:
        :param layout: display configuration
        :return: rewards and cumulated rewards for each agents
    """
    study_episode = make_episode(study_agent, scenario)
    ref_episode = make_episode(ref_agent, scenario)
    actions_ts = study_episode.action_data_table.set_index("timestamp")[[
        'action_line', 'action_subs'
    ]].sum(axis=1).to_frame(name="Nb Actions")
    df = observation_model.get_df_computed_reward(study_episode)
    action_events_df = pd.DataFrame(index=df["timestep"],
                                    data=np.nan,
                                    columns=["action_events"])
    action_events_df.loc[(actions_ts["Nb Actions"] > 0).values, "action_events"] = \
        df.loc[(actions_ts["Nb Actions"] > 0).values, "rewards"].values
    action_trace = go.Scatter(x=action_events_df.index,
                              y=action_events_df["action_events"],
                              name="Actions",
                              mode='markers',
                              marker_color='#FFEB3B',
                              marker={
                                  "symbol": "hexagon",
                                  "size": 10
                              })
    ref_reward_trace, ref_reward_cum_trace = ref_episode.reward_trace
    studied_agent_reward_trace, studied_agent_reward_cum_trace = study_episode.reward_trace

    # Make sure the timeframe is the study agent one
    ref_reward_trace.x = studied_agent_reward_trace.x
    ref_reward_trace.y = ref_reward_trace.y[:len(studied_agent_reward_trace.y)]
    ref_reward_cum_trace.x = ref_reward_cum_trace.x
    ref_reward_cum_trace.y = ref_reward_cum_trace.y[:len(
        studied_agent_reward_cum_trace.y)]

    return {
        'data': [
            ref_reward_trace, ref_reward_cum_trace, studied_agent_reward_trace,
            studied_agent_reward_cum_trace, action_trace
        ],
        'layout': {
            **layout,
            'yaxis': {
                'title': 'Instant Reward'
            },
            'yaxis2': {
                'title': 'Cumulated Reward',
                'side': 'right',
                'anchor': 'x',
                'overlaying': 'y'
            },
        }
    }
Esempio n. 4
0
def make_rewards_ts(study_agent, ref_agent, scenario, rew_layout,
                    cumrew_layout):
    """
        Make kpi with rewards and cumulated reward for both reference agent and study agent.

        :param study_agent: agent studied
        :param ref_agent: agent to compare with
        :param scenario:
        :param layout: display configuration
        :return: rewards and cumulated rewards for each agents
    """
    study_episode = make_episode(study_agent, scenario)
    ref_episode = make_episode(ref_agent, scenario)
    actions_ts = study_episode.action_data_table.set_index("timestamp")[[
        'action_line', 'action_subs', 'action_redisp'
    ]].sum(axis=1).to_frame(name="Nb Actions")
    df = observation_model.get_df_computed_reward(study_episode)
    action_events_df = pd.DataFrame(index=df["timestep"],
                                    data=np.nan,
                                    columns=["action_events"])
    action_events_df.loc[(actions_ts["Nb Actions"] > 0).values, "action_events"] = \
        df.loc[(actions_ts["Nb Actions"] > 0).values, "rewards"].values
    action_trace = go.Scatter(x=action_events_df.index,
                              y=action_events_df["action_events"],
                              name="Actions",
                              mode='markers',
                              marker_color='#FFEB3B',
                              marker={
                                  "symbol": "hexagon",
                                  "size": 10
                              },
                              text=action_tooltip(study_episode.actions))
    ref_reward_trace, ref_reward_cum_trace = ref_episode.reward_trace
    studied_agent_reward_trace, studied_agent_reward_cum_trace = study_episode.reward_trace

    # Make sure the timeframe is the study agent one
    # Copy is needed to avoid modifying the objects in place
    ref_reward_trace_copy = copy(ref_reward_trace)
    ref_reward_cum_trace_copy = copy(ref_reward_cum_trace)
    ref_reward_trace_copy.x = studied_agent_reward_trace.x
    ref_reward_trace_copy.y = ref_reward_trace.y[:len(
        studied_agent_reward_trace.y)]
    ref_reward_cum_trace_copy.x = ref_reward_cum_trace.x
    ref_reward_cum_trace_copy.y = ref_reward_cum_trace.y[:len(
        studied_agent_reward_cum_trace.y)]

    rew_layout.update(xaxis=dict(
        range=[ref_reward_trace_copy.x[0], ref_reward_trace_copy.x[-1]]))

    return {
        'data':
        [ref_reward_trace_copy, studied_agent_reward_trace, action_trace],
        'layout': rew_layout,
    }, {
        'data': [ref_reward_cum_trace_copy, studied_agent_reward_cum_trace],
        'layout': cumrew_layout,
    }
Esempio n. 5
0
def make_action_ts(study_agent, ref_agent, scenario, layout_def=None):
    """
        Make the action timeseries trace of study and reference agents.

        :param study_agent: studied agent to compare
        :param ref_agent: reference agent to compare with
        :param scenario:
        :param layout_def: layout page
        :return: nb action and distance for each agents
    """
    ref_episode = make_episode(ref_agent, scenario)
    study_episode = make_episode(study_agent, scenario)
    actions_ts = get_actions_sum(study_episode)
    ref_agent_actions_ts = get_actions_sum(ref_episode)

    # used below to make sure the x-axis length is the study agent one
    study_agent_length = len(study_episode.action_data_table)

    figure = {
        'data': [
            go.Scatter(x=study_episode.action_data_table.timestamp,
                       y=actions_ts["Nb Actions"],
                       name=study_agent,
                       text=action_tooltip(study_episode.actions)),
            go.Scatter(
                x=ref_episode.action_data_table.timestamp[:study_agent_length],
                y=ref_agent_actions_ts["Nb Actions"][:study_agent_length],
                name=ref_agent,
                text=action_tooltip(ref_episode.actions)),
            go.Scatter(x=study_episode.action_data_table.timestamp,
                       y=study_episode.action_data_table["distance"],
                       name=study_agent + " distance",
                       yaxis='y2'),
            go.Scatter(
                x=ref_episode.action_data_table.timestamp[:study_agent_length],
                y=ref_episode.action_data_table["distance"]
                [:study_agent_length],
                name=ref_agent + " distance",
                yaxis='y2'),
        ],
        'layout': {
            **layout_def, 'yaxis': {
                'title': 'Actions'
            },
            'yaxis2': {
                'title': 'Distance',
                'side': 'right',
                'anchor': 'x',
                'overlaying': 'y'
            }
        }
    }

    return figure
Esempio n. 6
0
def agent_select_update(scenario, pathname, agents, agent_default_value,
                        options, value, disabled_views):
    if value is None:
        options = [{"label": agent, "value": agent} for agent in agents]
        value = agent_default_value
        manager.make_episode(value, scenario)
    disabled = False
    pathname_split = pathname.split("/")
    pathname_split = pathname_split[len(pathname_split) - 1]
    if pathname_split in disabled_views:
        disabled = True
    return options, disabled, value
Esempio n. 7
0
    def update_agent_ref_graph(
        relayout_data_store,
        window,
        figure_overflow,
        figure_usage,
        study_agent,
        scenario,
    ):
        if relayout_data_store is not None and relayout_data_store[
                "relayout_data"]:
            relayout_data = relayout_data_store["relayout_data"]
            layout_usage = figure_usage["layout"]
            new_axis_layout = get_axis_relayout(figure_usage, relayout_data)
            if new_axis_layout is not None:
                layout_usage.update(new_axis_layout)
                figure_overflow["layout"].update(new_axis_layout)
                return figure_overflow, figure_usage

        if window is not None:
            figure_overflow["layout"].update(
                xaxis=dict(range=window, autorange=False))
            figure_usage["layout"].update(
                xaxis=dict(range=window, autorange=False))

        return common_graph.agent_overflow_usage_rate_trace(
            make_episode(study_agent, scenario), figure_overflow, figure_usage)
Esempio n. 8
0
    def load_flow_voltage_graph(
        selected_objects,
        choice,
        relayout_data_store,
        window,
        figure,
        study_agent,
        scenario,
    ):
        if relayout_data_store is not None and relayout_data_store[
                "relayout_data"]:
            relayout_data = relayout_data_store["relayout_data"]
            layout = figure["layout"]
            new_axis_layout = get_axis_relayout(figure, relayout_data)
            if new_axis_layout is not None:
                layout.update(new_axis_layout)
                return figure
        new_episode = make_episode(study_agent, scenario)
        if selected_objects is not None:
            if choice == "voltage":
                figure["data"] = load_voltage_for_lines(
                    selected_objects, new_episode)
            if "flow" in choice:
                figure["data"] = load_flows_for_lines(selected_objects,
                                                      new_episode)
            if "redispatch" in choice:
                figure["data"] = load_redispatch(selected_objects, new_episode)

        if window is not None:
            figure["layout"].update(xaxis=dict(range=window, autorange=False))

        return figure
Esempio n. 9
0
def update_nbs(study_agent, scenario):
    new_episode = make_episode(study_agent, scenario)
    score = f'{get_score_agent(new_episode):,}'
    nb_overflow = f'{get_nb_overflow_agent(new_episode):,}'
    nb_action = f'{get_nb_action_agent(new_episode):,}'

    return score, nb_overflow, nb_action
Esempio n. 10
0
 def update_card_step(scenario):
     """Display the best agent number of step when the page is loaded."""
     best_agent_ep = make_episode(best_agents[scenario]["agent"], scenario)
     return "{} / {}".format(
         best_agent_ep.meta["nb_timestep_played"],
         best_agent_ep.meta["chronics_max_timestep"],
     )
Esempio n. 11
0
 def update_network_graph_t(study_agent, ts, scenario):
     if study_agent is None or scenario is None or ts is None:
         raise PreventUpdate
     episode = make_episode(study_agent, scenario)
     return make_network_agent_study(episode,
                                     timestep=int(ts),
                                     responsive=True)
Esempio n. 12
0
def update_more_info(active_cell, study_agent, scenario, data):
    if active_cell is None:
        raise PreventUpdate
    new_episode = make_episode(study_agent, scenario)
    row_id = active_cell["row_id"]
    act = new_episode.actions[row_id]
    return str(act)
Esempio n. 13
0
def update_table(loads, prods, agent_ref, start_date, end_date, data, scenario):
    """
        Update the inspection table with the loads and prods selected.

        Triggered when the select a load or a prods and when the ref agent is changed.
    """
    if agent_ref is None:
        raise PreventUpdate
    episode = make_episode(agent_ref, scenario)
    df = observation_model.init_table_inspection_data(episode)
    if data is None:
        return [{"name": i, "id": i} for i in df.columns], df.to_dict('records')
    if loads is None:
        loads = []
    if prods is None:
        prods = []
    df["timestamp"] = pd.to_datetime(df["timestamp"])
    cols_to_drop = []
    for col in df.columns[4:]:
        if col not in loads and col not in prods:
            cols_to_drop.append(col)
    cols_to_add = [col for col in loads + prods if col not in df.columns]
    df = df.drop(cols_to_drop, axis=1)
    if cols_to_add:
        df = df.merge(
            observation_model.get_prod_and_conso(episode)[cols_to_add], left_on="timestamp", right_index=True)
    if start_date is not None:
        df = df[df["timestamp"] >= start_date]
    if end_date is not None:
        df = df[df["timestamp"] <= end_date]
    cols = [{"name": i, "id": i} for i in df.columns]
    return cols, df.to_dict('records')
Esempio n. 14
0
 def simulate(
     simulate_n_clicks,
     actions,
     network_graph_new,
     active_tab_choose_assist,
     simulation_assistant_store,
     scenario,
     agent,
     ts,
     network_graph_t,
 ):
     if simulate_n_clicks is None:
         return
     if actions is None and simulation_assistant_store is None:
         return "No action performed"
     if active_tab_choose_assist == "tab-assist-method":
         episode = make_episode(agent, scenario)
         return dcc.Graph(figure=go.Figure(
             assistant.store_to_graph(simulation_assistant_store, episode,
                                      int(ts))))
     else:
         if actions:
             return dcc.Graph(figure=network_graph_new)
         else:
             return dcc.Graph(figure=network_graph_t)
Esempio n. 15
0
def update_action_repartition_pie(study_agent, figure, scenario):
    new_episode = make_episode(study_agent, scenario)
    figure['data'] = action_repartition_pie(new_episode)
    figure['layout'].update(
        actions_model.update_layout(figure["data"][0].values == (0, 0),
                                    "No Actions for this Agent"))
    return figure
Esempio n. 16
0
def load_environments_ts(equipments, relayout_data_store, figure, kind, scenario):
    """
        Load selected kind of environment for chosen equipments in a scenario.

        Triggered when user click on a equipment displayed in the
        input_assets_selector in the overview layout.
    """
    if relayout_data_store is not None and relayout_data_store['relayout_data']:
        relayout_data = relayout_data_store["relayout_data"]
        layout = figure["layout"]
        new_axis_layout = get_axis_relayout(figure, relayout_data)
        if new_axis_layout is not None:
            layout.update(new_axis_layout)
            return figure

    if kind is None:
        return figure
    if isinstance(equipments, str):
        equipments = [equipments]  # to make pd.series.isin() work

    figure['data'] = common_graph.environment_ts_data(
        kind,
        make_episode(best_agents[scenario]['agent'], scenario),
        equipments
    )

    return figure
Esempio n. 17
0
def update_card_duration_maintenances(scenario):
    """
        Display the total duration of maintenances made by the best agent when
        page is loaded.
    """
    best_agent_ep = make_episode(best_agents[scenario]['agent'], scenario)
    return best_agent_ep.total_maintenance_duration
Esempio n. 18
0
 def update_interactive_graph(slider_value, study_agent, scenario):
     new_episode = make_episode(study_agent, scenario)
     act = new_episode.actions[slider_value]
     if any(act.get_types()):
         act_as_str = str(act)
     else:
         act_as_str = "NO ACTION"
     return make_network(new_episode).plot_obs(new_episode.observations[slider_value]), act_as_str
Esempio n. 19
0
    def update_select_prods(children, scenario):
        """
        Display list of production in a the select_prods_for_tb component.

        Triggered when indicator line is loaded.
        """
        episode = make_episode(best_agents[scenario]["agent"], scenario)
        return [{"label": prod, "value": prod} for prod in episode.prod_names]
Esempio n. 20
0
 def update_user_timestamps_options(data, agent, scenario):
     episode = manager.make_episode(agent, scenario)
     nb_timesteps_played = episode.meta['nb_timestep_played']
     return [
         ts for ts in data
         if dt.datetime.strptime(ts['value'], '%Y-%m-%d %H:%M') in
         episode.timestamps[:nb_timesteps_played]
     ]
Esempio n. 21
0
 def update_date_range(agent_ref, scenario):
     """Change the date range for the date picker in inspector line"""
     if agent_ref is None or scenario is None:
         raise PreventUpdate
     episode = make_episode(agent_ref, scenario)
     return (
         episode.production["timestamp"].dt.date.values[0],
         episode.production["timestamp"].dt.date.values[-1],
     )
Esempio n. 22
0
 def simulation_method_tab_content(active_tab, scenario, study_agent):
     episode = make_episode(study_agent, scenario)
     if active_tab is None:
         raise PreventUpdate
     if active_tab == "tab-choose-method":
         return choose_tab_content(episode)
     elif active_tab == "tab-assist-method":
         return assistant.register_layout(
             episode, layout_to_ckeck_against=choose_tab_content(episode))
Esempio n. 23
0
def update_ts_graph_avail_assets(kind, scenario):
    """
        Change the selector's options according to the kind of trace selected.

        Triggered when user click on one of the input in the scen_overview_ts_switch
        component in overview layout.
    """
    best_agent_ep = make_episode(best_agents[scenario]['agent'], scenario)
    return common_graph.ts_graph_avail_assets(kind, best_agent_ep)
Esempio n. 24
0
def maintenance_duration_hist(study_agent, figure, scenario):
    new_episode = make_episode(study_agent, scenario)
    figure['data'] = [go.Histogram(x=hist_duration_maintenances(new_episode))]
    figure["layout"].update(
        actions_model.update_layout(
            len(figure["data"][0]["x"]) == 0,
            "No Maintenances for this scenario"))
    figure["layout"]["xaxis"]["rangemode"] = "tozero"

    return figure
Esempio n. 25
0
def compute_window(user_selected_timestamp, study_agent, scenario):
    if user_selected_timestamp is not None:
        n_clicks_left = 0
        n_clicks_right = 0
        new_episode = make_episode(study_agent, scenario)
        center_indx = center_index(user_selected_timestamp, new_episode)

        return common_graph.compute_windows_range(new_episode, center_indx,
                                                  n_clicks_left,
                                                  n_clicks_right)
Esempio n. 26
0
 def update_user_timestamps_options(data, agent, scenario):
     if data is None:
         raise PreventUpdate
     episode = manager.make_episode(agent, scenario)
     nb_timesteps_played = episode.meta["nb_timestep_played"]
     return [
         ts for ts in data
         if dt.datetime.strptime(ts["value"], "%Y-%m-%d %H:%M") in
         episode.timestamps[:nb_timesteps_played]
     ]
Esempio n. 27
0
def update_select_loads(children, scenario):
    """
        Display list of loads in a the select_loads_for_tb component.

        Triggered when indicator line is loaded.
    """
    episode = make_episode(best_agents[scenario]["agent"], scenario)
    return [
        {'label': load, "value": load} for load in [*episode.load_names, 'total']
    ]
Esempio n. 28
0
def layout(scenario, studied_agent):
    episode = make_episode(studied_agent, scenario)
    # TODO : center en correct time step
    network_graph = make_network(episode).plot_obs(
        observation=episode.observations[0]
    )
    return html.Div(id="simulation_page", children=[
        choose_assist_line(episode, network_graph),
        compare_line(network_graph),
    ])
Esempio n. 29
0
def update_agent_log_action_table(study_agent, scenario):
    new_episode = make_episode(study_agent, scenario)
    table = actions_model.get_action_table_data(new_episode)
    table['id'] = table['timestep']
    table.set_index('id', inplace=True, drop=False)
    cols_to_exclude = ["id", "lines_modified", "subs_modified"]
    cols = [{
        "name": action_table_name_converter[col],
        "id": col
    } for col in table.columns if col not in cols_to_exclude]
    return cols, table.to_dict("record")
Esempio n. 30
0
def layout(user_selected_timestamp, study_agent, ref_agent, scenario):
    best_episode = make_episode(best_agents[scenario]["agent"], scenario)
    new_episode = make_episode(study_agent, scenario)
    center_indx = center_index(user_selected_timestamp, new_episode)
    network_graph = make_network(new_episode).get_plot_observation(
        new_episode.observations[center_indx])

    return html.Div(id="micro_page",
                    children=[
                        dcc.Store(id="window",
                                  data=compute_window(user_selected_timestamp,
                                                      study_agent, scenario)),
                        indicator_line(),
                        flux_inspector_line(
                            network_graph,
                            slider_params(user_selected_timestamp,
                                          new_episode)),
                        context_inspector_line(best_episode, new_episode),
                        all_info_line
                    ])