def _runner_timing(
        env,
        agent,
        n_timings=100,
        verbose=False,
        **kwargs,
    ):
        np.random.seed(0)
        env.seed(0)
        env.chronics_handler.tell_id(-1)

        agent.set_kwargs(**kwargs)
        agent.print_agent(default=verbose)

        timings = []

        done = False
        obs = env.reset()
        pprint("    - Chronic:", env.chronics_handler.get_id())
        for _ in range(n_timings):
            action, timing = agent.act_with_timing(obs, done)

            obs_next, reward, done, info = env.step(action)
            obs = obs_next
            if done:
                obs = env.reset()
                pprint("    - Done! Next chronic:",
                       env.chronics_handler.get_id())

            timings.append(timing)

        return pd.DataFrame(timings)
Esempio n. 2
0
def load_agent_experience(env, agent_name, case_experience_dir):
    collector = ExperienceCollector(save_dir=case_experience_dir)
    collector.load_data(agent_name=agent_name, env=env)

    pprint("    - Number of loaded chronics:", len(collector.chronic_ids))

    return collector
Esempio n. 3
0
    def aggregate_data(self, verbose=False):
        observations = []
        actions = []
        rewards = []
        dones = []
        for chronic_idx in self.data:
            data_chronic = self.data[chronic_idx]
            obses_chronic = data_chronic["obses"]
            actions_chronic = data_chronic["actions"]
            rewards_chronic = data_chronic["rewards"]
            dones_chronic = data_chronic["dones"]

            if verbose:
                pprint("Chronic:", chronic_idx)
                pprint(
                    "        - O A R D:",
                    len(obses_chronic),
                    len(actions_chronic),
                    rewards_chronic.shape,
                    dones_chronic.shape,
                )

            observations.extend(obses_chronic[:-1])
            actions.extend(actions_chronic)
            rewards.extend(rewards_chronic.tolist())
            dones.extend(dones_chronic.tolist())

        return observations, actions, np.array(rewards), np.array(dones)
Esempio n. 4
0
    def test_gns_dataset(self):
        if sys.platform != "win32":
            self.assertTrue(True)
            return

        experience_dir = os.path.join("../experience", "data")

        case_name = "rte_case5_example"
        agent_name = "agent-mip"

        env_dc = True

        case, collector = load_experience(case_name,
                                          agent_name,
                                          experience_dir,
                                          env_dc=env_dc)
        obses, actions, rewards, dones = collector.aggregate_data()

        n_batch = 16
        max_length = 10 * n_batch + 1
        n_window = 2

        graphs_dict_list = obses_to_lgraphs(obses,
                                            dones,
                                            case,
                                            max_length=max_length,
                                            n_window=n_window)
        cgraphs = lgraphs_to_cgraphs(graphs_dict_list)
        labels = is_do_nothing_action(actions, case.env)

        graph_dims = get_graph_feature_dimensions(cgraphs=cgraphs)
        graph_dataset = tf_batched_graph_dataset(cgraphs,
                                                 n_batch=n_batch,
                                                 **graph_dims)
        label_dataset = tf.data.Dataset.from_tensor_slices(labels).batch(
            n_batch)
        dataset = tf.data.Dataset.zip((graph_dataset, label_dataset))
        dataset = dataset.repeat(1)

        for batch_idx, (graph_batch, label_batch) in enumerate(dataset):
            graph_batch_from_list = utils_tf.data_dicts_to_graphs_tuple(
                graphs_dict_list[(n_batch * batch_idx):(n_batch *
                                                        (batch_idx + 1))])

            check = tf.squeeze(equal_graphs(graph_batch,
                                            graph_batch_from_list)).numpy()

            pprint("Batch:", batch_idx, check)

            if not check:
                for field in [
                        "globals",
                        "nodes",
                        "edges",
                ]:
                    print_matrix(getattr(graph_batch, field))
                    print_matrix(getattr(graph_batch_from_list, field))

            self.assertTrue(check)
Esempio n. 5
0
    def load_model(self):
        model = load_dnn(**self.model_kwargs)

        ckpt_dir = os.path.join(self.model_dir, "ckpts")
        ckpt = tf.train.Checkpoint(model=model, optimizer=tf.keras.optimizers.Adam())
        ckpt_manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=1)
        ckpt.restore(ckpt_manager.latest_checkpoint).expect_partial()

        pprint("Restoring checkpoint from:", ckpt_manager.latest_checkpoint)
        return model
Esempio n. 6
0
    def get_topology_action_set(self, save_dir=None, verbose=False):
        (
            actions_line_set,
            actions_line_set_info,
        ) = self.get_all_unitary_line_status_set()

        (
            actions_topology_set,
            actions_topology_set_info,
        ) = self.get_all_unitary_topologies_set(filter_one_line_disconnections=True)

        action_do_nothing = self.env.action_space({})

        actions = list(
            itertools.chain([action_do_nothing], actions_line_set, actions_topology_set)
        )
        actions_info = list(
            itertools.chain([{}], actions_line_set_info, actions_topology_set_info)
        )

        if verbose:
            pprint("Action set:", len(actions), "\n")

        if save_dir:
            actions_descriptions = []
            for action_id, info in enumerate(actions_info):
                line_id = np.nan
                sub_id = np.nan
                conf = ""
                if info:
                    if info["action_type"] == "line_status_set":
                        line_id = int(info["line_id"])
                        conf = info["configuration"]
                    elif info["action_type"] == "topology_set":
                        sub_id = int(info["sub_id"])
                        conf = "-".join([str(b) for b in info["topology"]])
                else:
                    conf = "Do-nothing"

                actions_descriptions.append(
                    {
                        "action_id": action_id,
                        "line_id": line_id,
                        "sub_id": sub_id,
                        "conf": conf,
                    }
                )

            actions_descriptions = pd.DataFrame(actions_descriptions)
            with open(os.path.join(save_dir, f"action_space.csv"), "w") as f:
                f.write(actions_descriptions.to_string())

        return actions, actions_info
Esempio n. 7
0
 def _load_chronics(self, agent_name):
     pprint(
         "    - Loading chronics:",
         self.save_dir.replace("\\", "/") + f"/{agent_name}-chronic-****",
     )
     for chronic_file in os.listdir(self.save_dir):
         if ("chronic-" in chronic_file and agent_name in chronic_file
                 and ".npz" in chronic_file):
             chronic_idx = int(
                 os.path.splitext(chronic_file)[0].split("-")[-1])
             self.chronic_files.append(chronic_file)
             self.chronic_ids.append(chronic_idx)
Esempio n. 8
0
def describe_results(metrics, results, y, name=None):
    pprint("\n    - Dataset", name)

    for metric, value in zip(metrics, results):
        if metric in ["recall", "precision", "auc"]:
            continue

        if metric in ["tp", "fn", "tn", "fp"]:
            if metric in ["tp", "fn"]:
                c = 1
            else:
                c = 0

            n = np.sum(np.equal(y, c))
            rate = 100.0 * value / n

            ratio_str = "{}/{}".format(int(value), int(n))
            pprint(
                f"        - {metric.upper()}:",
                "{:<15}{:>8.2f} %".format(ratio_str, rate),
            )
        elif metric == "mcc":
            mcc_tf = float(value)
            pprint(f"        - {metric.capitalize()}:", "{:.4f}".format(mcc_tf))
        else:
            pprint(f"        - {metric.capitalize()}:", "{:.4f}".format(value))
Esempio n. 9
0
def print_dataset(x, y, name):
    if isinstance(x, np.ndarray):
        pprint(
            f"    - {name}:", "X, Y", "{:>20}, {}".format(str(x.shape), str(y.shape))
        )
    elif isinstance(x, list) and isinstance(x[0], dict):
        pprint(f"    - {name}:", "X, Y", "{:>20}, {}".format(len(x), str(y.shape)))
        for field in x[0]:
            if np.equal(x[0][field].shape, x[1][field].shape).all():
                pprint(f"        - X: {field}", x[0][field].shape)
            else:
                raise ValueError("Dimension mismatch.")
    else:
        raise ValueError("Unknown data structure.")

    pprint("        - Positive labels:", "{:.2f} %".format(100 * y.mean()))
    pprint("        - Negative labels:", "{:.2f} %\n".format(100 * (1 - y).mean()))
Esempio n. 10
0
 def print_collector(phase):
     print("\n" + "-" * 80)
     pprint("", f"{phase} Experience")
     print("-" * 80)
Esempio n. 11
0
 def print_experiment(exp_name):
     print("\n" + "-" * 80)
     pprint("Experiment:", exp_name)
     print("-" * 80)
Esempio n. 12
0
        actions_topology_set,
        actions_topology_set_info,
    ) = action_generator.get_all_unitary_topologies_set(
        verbose=False, filter_one_line_disconnections=False)
    (
        actions_topology_set_filtered,
        actions_topology_set_filtered_info,
    ) = action_generator.filter_one_line_disconnections(
        actions_topology_set, actions_topology_set_info, verbose=verbose)

    (
        actions_line_set,
        actions_line_set_info,
    ) = action_generator.get_all_unitary_line_status_set(verbose=verbose)

    pprint("actions:", "1 do-nothing action")

    pprint("Topology set actions:", "")
    pprint("grid2op", len(grid2op_actions_topology_set))
    pprint("custom", len(actions_topology_set))
    pprint("custom filtered", len(actions_topology_set_filtered))

    pprint("Line set actions:", "")
    pprint("grid2op", len(grid2op_actions_line_set))
    pprint("custom", len(actions_line_set))

if __name__ == "__main__":
    # case_name = "l2rpn_2019"
    case_name = "rte_case5_example"
    verbose = True
Esempio n. 13
0
                chronic_X_forecasts, max_shift=n_window_forecasts)
            chronic_X = np.hstack((chronic_X, chronic_X_forecasts))

        mask_targets.extend(chronic_mask_targets)
        X_all.extend(chronic_X)
        Y_all.extend(chronic_labels)

    mask_targets = np.array(mask_targets)
    X_all = np.vstack(X_all)
    Y_all = np.array(Y_all)

    X_std = X_all.std(axis=0)
    mask_zero = np.equal(X_std, 0.0)
    X_all = X_all[:, ~mask_zero]

    pprint("    - Removed columns:", mask_zero.sum())

    if feature_scaling:
        X_all = X_all / X_std[~mask_zero]

    X = X_all[mask_targets, :]
    Y = Y_all[mask_targets]

    pprint(
        "    - Labels:",
        f"{Y_all.sum()}/{Y_all.size}",
        "{:.2f} %".format(100 * Y_all.mean()),
    )

    return X, Y, mask_targets, X_all, Y_all
    def _runner(env, agent, n_steps=100, verbose=False):
        np.random.seed(0)
        env.seed(0)
        env.chronics_handler.tell_id(-1)

        measurements = []

        e = 0  # Episode counter
        done = False
        reward = 0.0
        obs = env.reset()
        pprint("    - Chronic:", env.chronics_handler.get_id())
        agent.reset(obs=obs)
        for t in range(n_steps):
            action = agent.act(obs, reward, done=done)
            obs_next, reward, done, info = env.step(action)

            if t % 50 == 0 or verbose:
                pprint("Step:", env.chronics_handler.real_data.data.current_index)

            reward_est = agent.get_reward()
            res_line, res_gen = agent.compare_with_observation(obs_next, verbose=False)

            dist, dist_status, dist_sub = agent.distance_to_ref_topology(
                obs_next.topo_vect, obs_next.line_status
            )

            measurement = dict()
            measurement["t"] = t
            measurement["e"] = e
            measurement["reward"] = reward
            measurement["reward-est"] = reward_est
            measurement["dist"] = dist
            measurement["dist_status"] = dist_status
            measurement["dist_sub"] = dist_sub
            measurement["rho"] = res_line["rho"].max()
            measurement["env-rho"] = res_line["env_rho"].max()
            measurement["env-gens-p"] = obs.prod_p.sum()
            measurement["env-loads-p"] = obs.load_p.sum()

            for gen_id in res_gen.index:
                measurement[f"gen-{gen_id}"] = res_gen["p_pu"][gen_id]
                measurement[f"env-gen-{gen_id}"] = res_gen["env_p_pu"][gen_id]
                measurement[f"env-gen-{gen_id}-q"] = res_gen["env_q_pu"][gen_id]
                measurement[f"env-q-p-{gen_id}"] = res_gen["env_gen_q_p"][gen_id]

            for line_id in res_line.index:
                measurement[f"line-{line_id}"] = res_line["p_pu"][line_id]
                measurement[f"env-line-{line_id}"] = res_line["env_p_pu"][line_id]

                measurement[f"rho-{line_id}"] = res_line["rho"][line_id]
                measurement[f"env-rho-{line_id}"] = res_line["env_rho"][line_id]

            measurements.append(measurement)

            obs = obs_next
            step = env.chronics_handler.real_data.data.current_index
            if done:
                obs = env.reset()
                pprint(
                    "        - Length:",
                    f"{step}/{env.chronics_handler.real_data.data.max_iter}",
                )
                pprint("    - Done! Next chronic:", env.chronics_handler.get_id())
                agent.reset(obs=obs)
                e = e + 1

        measurements = pd.DataFrame(measurements)
        return measurements
Esempio n. 15
0
            if save_dir:
                agent_name_ = agent_name.replace(" ", "-").lower()
                file_name = (f"{agent_name_}-chronic-" +
                             "{:05}".format(chronic_idx) + "-")
                fig_mu.savefig(os.path.join(save_dir, file_name + "mus"))
                fig_frac.savefig(
                    os.path.join(save_dir, file_name + "fraction-obj"))

            plt.close(fig_mu)
            plt.close(fig_frac)

    @staticmethod
    def _runner(case, env, agent, n_chronics, n_steps,
                done_chronic_indices=()):
        chronics_dir, chronics, chronics_sorted = get_sorted_chronics(env=env)
        pprint("Chronics:", chronics_dir)

        np.random.seed(0)
        env.seed(0)

        chronic_data = []
        for chronic_idx, chronic in enumerate(chronics_sorted):
            if len(chronic_data) >= n_chronics > 0:
                break

            if chronic_idx in done_chronic_indices:
                continue

            chronic_org_idx = chronics.index(chronic)
            env.chronics_handler.tell_id(chronic_org_idx - 1)  # Set chronic id
Esempio n. 16
0
def save_dataframe_to_bz2(data, save_dir, file_name, sep=";"):
    file_path = os.path.join(save_dir, file_name)
    pprint("        - Saving to:", "/".join(file_path.split("\\")[-4:]))
    data.to_csv(file_path, index=False, sep=sep, compression="bz2")
    return file_path
Esempio n. 17
0
    def _save_chronic(self, agent_name, chronic_idx, verbose=False):
        obses = np.array(self.obses)
        actions = np.array(self.actions)
        rewards = np.array(self.rewards)
        dones = np.array(self.dones)

        distances = np.array(self.distances)
        distances_line = np.array(self.distances_line)
        distances_sub = np.array(self.distances_sub)

        semi_actions = np.array(self.semi_actions)

        total_return = np.array(self.total_return)
        duration = np.array(self.duration)
        chronic_len = np.array(self.chronic_len)
        chronic_name = self.chronic_name

        agent_name = agent_name.replace(" ", "-").lower()
        chronic_file = "{}-chronic-{:05}.npz".format(agent_name, chronic_idx)
        pprint("        - Experience saved to:", chronic_file)

        if verbose:
            pprint("        - Observations:", obses.shape)
            pprint("        - Actions:", actions.shape)
            pprint("        - Rewards:", rewards.shape)
            pprint("        - Dones:", dones.shape)
            pprint("        - Distances:", distances.shape)
            pprint("            - Line:", distances_line.shape)
            pprint("            - Substation:", distances_sub.shape)
            pprint("        - Semi-Actions:", semi_actions.shape)
            pprint("        - Return:", total_return)
            pprint("        - Duration:", duration)
            pprint("        - Length:", chronic_len)
        else:
            pprint(
                "        - O A R D Dist TR Dur L:",
                obses.shape,
                actions.shape,
                rewards.shape,
                dones.shape,
                distances.shape,
                distances_line.shape,
                distances_sub.shape,
                semi_actions.shape,
                total_return,
                duration,
                chronic_len,
            )

        np.savez_compressed(
            os.path.join(self.save_dir, chronic_file),
            obses=obses,
            actions=actions,
            rewards=rewards,
            dones=dones,
            distances=distances,
            distances_line=distances_line,
            distances_sub=distances_sub,
            semi_actions=semi_actions,
            total_return=total_return,
            duration=duration,
            chronic_len=chronic_len,
            chronic_name=chronic_name,
        )

        self.chronic_files.append(chronic_file)
        self.chronic_ids.append(chronic_idx)
Esempio n. 18
0
def print_class_weights(class_weight):
    pprint("Class", "Weight")
    for c in class_weight:
        pprint(f"    - {c}", "{:.5f}".format(class_weight[c]))
Esempio n. 19
0
    def collect(self,
                env,
                agent,
                do_chronics=(),
                n_chronics=-1,
                n_steps=-1,
                verbose=False):
        self.print_collector("Collecting")
        agent.print_agent(default=verbose)

        agent_name = agent.name.replace(" ", "-").lower()
        self._load_chronics(agent_name=agent_name)

        chronics_dir, chronics, chronics_sorted = get_sorted_chronics(env=env)
        pprint("Chronics:", chronics_dir)

        if len(self.chronic_ids):
            pprint(
                "    - Done chronics:",
                ", ".join(map(lambda x: str(x), sorted(self.chronic_ids))),
            )

        if len(do_chronics):
            pprint(
                "    - To do chronics:",
                ", ".join(map(lambda x: str(x), sorted(do_chronics))),
            )

        done_chronic_ids = []
        for chronic_idx, chronic_name in enumerate(chronics_sorted):
            if len(done_chronic_ids) >= n_chronics > 0:
                break

            # If chronic already done
            if chronic_idx in self.chronic_ids:
                continue

            # Environment specific filtering
            if "rte_case5" in env.name:
                if chronic_idx not in do_chronics:
                    continue
            elif "l2rpn_2019" in env.name:
                if chronic_idx not in do_chronics:
                    continue
            elif "l2rpn_wcci_2020" in env.name:
                if chronic_idx not in do_chronics:
                    continue

            chronic_org_idx = chronics.index(chronic_name)
            env.chronics_handler.tell_id(chronic_org_idx - 1)  # Set chronic id

            obs = env.reset()
            agent.reset(obs=obs)

            chronic_len = env.chronics_handler.real_data.data.max_iter
            chronic_path_name = "/".join(
                os.path.normpath(env.chronics_handler.get_id()).split(
                    os.sep)[-3:])
            pprint("    - Chronic:", chronic_path_name)

            t = 0
            done = False
            reward = np.nan
            """
                Collect data.
            """
            while not done and not (t >= n_steps > 0):
                action = agent.act(obs, reward=reward, done=done)
                obs_next, reward, done, info = env.step(action)
                self._add(obs, action, reward, done)

                t = env.chronics_handler.real_data.data.current_index

                if t % 50 == 0:
                    pprint("        - Step:", t)

                if done:
                    pprint("        - Length:", f"{t}/{chronic_len}")

                obs = obs_next

            self.obses.append(obs.to_vect())
            done_chronic_ids.append(chronic_idx)

            self._save_chronic(agent_name, chronic_idx, verbose)
            self.reset()
Esempio n. 20
0
def read_bz2_to_dataframe(file_path, sep=";"):
    pprint("        - Loading from:", "/".join(file_path.split("\\")[-4:]))
    data_csv = bz2.BZ2File(file_path).read().decode()
    return pd.read_csv(StringIO(data_csv), sep=sep)
    max_ids = np.argsort(criticals)

    for line_id in reversed(max_ids[-3:]):
        sub_or = env.line_or_to_subid[line_id]
        sub_ex = env.line_ex_to_subid[line_id]

        critical = np.greater(rhos[:, line_id], critical_rho)
        overloaded = np.greater(rhos[:, line_id], 1.0)

        n_critical = critical.sum()
        n_overloaded = overloaded.sum()

        if n_critical > 0:
            pprint(
                f"    - Line {line_id}",
                sub_or,
                sub_ex,
                "{:.3f} + {:.3f}".format(means[line_id], stds[line_id]),
            )
            pprint(
                "        - Critical:",
                n_critical,
                "{:.3f} % / {:.3f} %".format(
                    100 * critical.mean(), 100 * n_critical / n_critical_all
                ),
            )
            pprint(
                "        - Overloaded:",
                n_overloaded,
                "{:.3f} % / {:.3f} %".format(
                    100 * overloaded.mean(), 100 * n_overloaded / n_overloaded_all
                ),
Esempio n. 22
0
def augment_chronic(
    prods,
    loads,
    config,
    augmentation="overload",
    min_p=0.05,
    max_p=0.15,
    targets=None,
    verbose=False,
):
    p = np.random.uniform(low=min_p, high=max_p)

    pprint("        - Augmenting:", f"p = {p}")
    pprint("        - Type:", augmentation)
    pprint("        - p:", [min_p, max_p])
    pprint("        - Targets:", str(targets))

    if verbose or True:
        pprint("        - Loads:",
               ["{:.2f}".format(l) for l in loads.mean(axis=0).values])
        pprint("        - Prods:",
               ["{:.2f}".format(g) for g in prods.mean(axis=0).values])

    if augmentation == "overload":
        prods = overload_injections(prods, p=p)
        loads = overload_injections(loads, p=p)
    else:
        if targets and config:
            prod_ids_to_names = ids_to_names(
                config["names_chronics_to_grid"]["prods"])
            load_ids_to_names = ids_to_names(
                config["names_chronics_to_grid"]["loads"])

            gen_names = [
                prod_ids_to_names[gen_id] for gen_id in targets["gen_ids"]
            ]
            load_names = [
                load_ids_to_names[load_id] for load_id in targets["load_ids"]
            ]

            prods[gen_names] = overload_injections(prods[gen_names], p=p)
            loads[load_names] = overload_injections(loads[load_names], p=p)

    if verbose or True:
        pprint("        - Loads:",
               ["{:.2f}".format(l) for l in loads.mean(axis=0).values])
        pprint("        - Prods:",
               ["{:.2f}".format(g) for g in prods.mean(axis=0).values])

    return prods, loads, p
Esempio n. 23
0
    def _runner(self,
                env,
                agent,
                do_chronics=(),
                n_chronics=-1,
                n_steps=-1,
                verbose=False):
        self.print_experiment("Performance")
        agent.print_agent(default=verbose)

        agent_name = agent.name.replace(" ", "-").lower()
        self.collector._load_chronics(agent_name=agent_name)

        chronics_dir, chronics, chronics_sorted = get_sorted_chronics(env=env)
        pprint("Chronics:", chronics_dir)

        if len(self.collector.chronic_ids):
            pprint(
                "    - Done chronics:",
                ", ".join(
                    map(lambda x: str(x), sorted(self.collector.chronic_ids))),
            )

        if len(do_chronics):
            pprint(
                "    - To do chronics:",
                ", ".join(map(lambda x: str(x), sorted(do_chronics))),
            )

        done_chronic_ids = []
        for chronic_idx, chronic_name in enumerate(chronics_sorted):
            if len(done_chronic_ids) >= n_chronics >= 0:
                break

            # If chronic already done
            if chronic_idx in self.collector.chronic_ids:
                continue

            # Environment specific filtering
            if "rte_case5" in env.name:
                if chronic_idx not in do_chronics:
                    continue
            elif "l2rpn_2019" in env.name:
                if chronic_idx not in do_chronics:
                    continue
            elif "l2rpn_wcci_2020" in env.name:
                if chronic_idx not in do_chronics:
                    continue

            chronic_org_idx = chronics.index(chronic_name)
            env.chronics_handler.tell_id(chronic_org_idx - 1)  # Set chronic id

            obs = env.reset()
            agent.reset(obs=obs)

            chronic_len = env.chronics_handler.real_data.data.max_iter
            chronic_path_name = "/".join(
                os.path.normpath(env.chronics_handler.get_id()).split(
                    os.sep)[-3:])

            augmentation_info = os.path.join(env.chronics_handler.get_id(),
                                             "augmentation.json")
            ps = None
            if os.path.isfile(augmentation_info):
                with open(augmentation_info, "r") as f:
                    ps = json.load(f)

            pprint("    - Chronic:", chronic_path_name)
            # if ps:
            #     p = ps["p"]
            #     min_p = ps["min_p"]
            #     max_p = ps["max_p"]
            #     targets = ps["targets"]
            #
            #     pprint("        - Augmentation:", ps["augmentation"])
            #     pprint(
            #         "            - Rate:",
            #         "p = {:.2f} ~ [{:.2f}, {:.2f}]".format(p, min_p, max_p),
            #     )
            #     if targets:
            #         pprint("            - Targets:", str(targets))

            t = 0
            done = False
            reward = np.nan
            """
                Collect data.
            """
            while not done and not (t >= n_steps > 0):
                action = agent.act(obs, reward=reward, done=done)
                obs_next, reward, done, info = env.step(action)
                self.collector._add(obs, action, reward, done)

                semi_action = False
                if agent.semi_agent is not None:
                    semi_action = agent.semi_agent.semi_action

                dist, dist_status, dist_status = agent.distance_to_ref_topology(
                    obs_next.topo_vect, obs_next.line_status)
                self.collector._add_plus(dist, dist_status, dist_status,
                                         semi_action)

                t = env.chronics_handler.real_data.data.current_index

                if t % 200 == 0:
                    semi_sum = np.sum(self.collector.semi_actions)
                    pprint("        - Step:", t, semi_sum)

                if done:
                    semi_sum = np.sum(self.collector.semi_actions)
                    pprint("        - Length:", f"{t}/{chronic_len}", semi_sum)

                obs = obs_next

            self.collector.obses.append(obs.to_vect())
            self.collector.total_return = compute_returns(
                self.collector.rewards)[0]
            self.collector.duration = t
            self.collector.chronic_len = chronic_len
            self.collector.chronic_name = chronic_name

            done_chronic_ids.append(chronic_idx)

            self.collector._save_chronic(agent_name, chronic_idx, verbose)
            self.collector.reset()
Esempio n. 24
0
        if chronic_idx <= 10:
            min_p, max_p = (0.05, 0.20)
            augmentation = "overload"
        elif 40 < chronic_idx <= 50:
            min_p, max_p = (0.25, 0.30)
            augmentation = "targeted"
            targets["gen_ids"] = [2, 4]
            targets["load_ids"] = [3, 4]
        else:
            min_p, max_p = (0.15, 0.20)
            augmentation = "overload"
    else:
        min_p, max_p = (0.05, 0.10)
        augmentation = "overload"

    pprint("    - Augmenting:", chronic)

    chronic_dir = os.path.join(case_chronics, chronic)
    art_chronic_dir = os.path.join(art_case_chronics, chronic)

    prods_file = [
        file for file in os.listdir(chronic_dir) if is_prods_file(file)
    ]
    loads_file = [
        file for file in os.listdir(chronic_dir) if is_loads_file(file)
    ]
    assert len(prods_file) == 1 and len(loads_file) == 1

    prods = read_bz2_to_dataframe(os.path.join(chronic_dir, prods_file[0]),
                                  sep=";")
    loads = read_bz2_to_dataframe(os.path.join(chronic_dir, loads_file[0]),
Esempio n. 25
0
    def test_correction(self):
        np.random.seed(0)

        y_labels = np.random.binomial(1, p=0.02, size=10000)
        y_preds = y_labels.copy()

        # Make predictions
        mask = np.zeros_like(y_preds, dtype=np.bool)
        rand_ints = np.random.randint(0, len(y_preds), len(y_preds) // 5)
        mask[rand_ints] = True
        y_preds[mask] = 1 - y_preds[mask]

        # Correct predictions
        out_preds = correct_predictions(y_labels, y_preds, w_f=0, w_b=5)

        pprint("mask", color_mask(mask, mask) + "\n", shift=10)
        pprint("y", format_matrix(y_labels)[0], shift=10)
        pprint("y_preds", format_matrix(y_preds)[0], shift=10)

        pprint(
            "correction",
            color_mask(
                np.not_equal(y_preds, out_preds),
                np.not_equal(y_preds, out_preds),
                color=1,
            ),
            shift=10,
        )
        pprint("out_preds", format_matrix(out_preds)[0], shift=10)
        pprint("y", format_matrix(y_labels)[0], shift=10)

        pprint("\t- mcc", "{:.3f}".format(matthews_corrcoef(y_labels,
                                                            y_preds)))
        pprint(
            "\t- mcc_out",
            "{:.3f}".format(matthews_corrcoef(y_labels, out_preds)) + "\n",
        )

        c_matrix = confusion_matrix(y_labels, out_preds)
        tn = c_matrix[0][0]
        fn = c_matrix[1][0]
        tp = c_matrix[1][1]
        fp = c_matrix[0][1]

        pprint("\t- TP:", "{:.3f}".format(tp / (tp + fn)))
        pprint("\t- FN:", "{:.3f}".format(fn / (tp + fn)))
        pprint("\t- FP:", "{:.3f}".format(fp / (tn + fp)))
        pprint("\t- TN:", "{:.3f}".format(tn / (tn + fp)))

        time.sleep(0.1)
        self.assertTrue(True)