Exemplo n.º 1
0
 def test_case5_runs(self):
     dataset_path = os.path.join(PATH_CHRONICS, "rte_case5_example")
     with make2(dataset_path) as env:
         assert env.redispatching_unit_commitment_availble == True
         obs = env.reset()
         sim_obs, reward, done, info = obs.simulate(env.action_space())
         assert sim_obs != obs
Exemplo n.º 2
0
 def test_case14_redisp_runs(self):
     dataset_path = os.path.join(PATH_CHRONICS, "rte_case14_redisp")
     with make2(dataset_path) as env:
         assert env.redispatching_unit_commitment_availble == True
         obs = env.reset()
         sim_obs, reward, done, info = obs.simulate(env.action_space())
         assert sim_obs != obs
         assert np.all(env._thermal_limit_a == case14_redisp_TH_LIM)
Exemplo n.º 3
0
 def test_l2rpn19_override_feed_kwargs(self):
     dataset_path = os.path.join(PATH_CHRONICS, "rte_L2RPN_2019")
     chronics_path = os.path.join(dataset_path, "chronics", "0000")
     dfk = {
         "chronicsClass": ChangeNothing,
         "path": chronics_path,
         "gridvalueClass": GridStateFromFile
     }
     with make2(dataset_path, data_feeding_kwargs=dfk) as env:
         assert isinstance(env.chronics_handler.real_data, ChangeNothing)
Exemplo n.º 4
0
 def test_case14_redisp_config(self):
     dataset_path = os.path.join(PATH_CHRONICS, "rte_case14_redisp")
     with make2(dataset_path) as env:
         # Check config is loaded from config.py
         assert env.rewardClass == RedispReward
         assert env.actionClass == TopoAndRedispAction
         assert env.observationClass == CompleteObservation
         assert isinstance(env.backend, PandaPowerBackend)
         assert env.legalActClass == DefaultRules
         assert isinstance(env.voltage_controler, ControlVoltageFromFile)
         assert isinstance(env.chronics_handler.real_data, Multifolder)
Exemplo n.º 5
0
 def test_l2rpn19_test_config(self):
     dataset_path = os.path.join(PATH_CHRONICS, "rte_L2RPN_2019")
     with make2(dataset_path) as env:
         # Check config is loaded from config.py
         assert env.rewardClass == L2RPNReward
         assert env.actionClass == TopologyAction
         assert env.observationClass == CompleteObservation
         assert isinstance(env.backend, PandaPowerBackend)
         assert env.legalActClass == DefaultRules
         assert isinstance(env.voltage_controler, ControlVoltageFromFile)
         assert isinstance(env.chronics_handler.real_data, Multifolder)
         assert env.action_space.grid_layout != None
Exemplo n.º 6
0
                        default=1000,
                        type=int,
                        help="Maximum number of steps per scenario")
    return parser.parse_args()


if __name__ == "__main__":
    args = cli()

    # Limit gpu usage
    physical_devices = tf.config.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(physical_devices[0], True)

    # Create dataset env
    env = make2(args.path_data,
                reward_class=RedispReward,
                action_class=CustomAction)

    # Create agent
    agent = RDQNAgent(env, env.action_space, is_training=False)
    # Load weights from file
    agent.load_network(args.path_model)

    # Build runner
    runner_params = env.get_params_for_runner()
    runner = Runner(**runner_params, agentClass=None, agentInstance=agent)

    # Run
    res = runner.run(path_save=args.path_logs,
                     nb_episode=args.nb_episode,
                     nb_process=args.nb_process,
Exemplo n.º 7
0
        impact = action.impact_on_objects()
        pruned_impact = {
            "injection": prune_impact_bool(impact["injection"], "changed"),
            "line_reconnect": prune_impact_count(impact["force_line"]["reconnections"], "count"),
            "line_disconnect": prune_impact_count(impact["force_line"]["disconnections"], "count"),
            "connect_bus": prune_impact_array(impact["topology"], "assigned_bus"),
            "disconnect_bus": prune_impact_array(impact["topology"], "disconnect_bus"),
            "switch_line": prune_impact_array(impact["switch_line"], "powerlines"),
            "switch_bus": prune_impact_array(impact["topology"], "bus_switch"),
            "redispatch": prune_impact_array(impact["redispatch"], "generators")
        }
        compact_impact = {k: v for k, v in pruned_impact.items() if v is not None}
        actions_dict[index_str] = compact_impact

    actions_json = json.dumps(actions_dict,
                              indent=2,
                              cls=NpEncoder)
    print (actions_json)
        

if __name__ == "__main__":
    args = cli()
    env = make2(args.path_data, action_class=CustomAction)
    # Limit gpu usage
    physical_devices = tf.config.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(physical_devices[0], True)

    agent = DoubleDuelingDQNAgent(env, env.action_space,
                                  is_training=False)
    print_actions(agent)
            "connect_bus":
            prune_impact_array(impact["topology"], "assigned_bus"),
            "disconnect_bus":
            prune_impact_array(impact["topology"], "disconnect_bus"),
            "switch_line":
            prune_impact_array(impact["switch_line"], "powerlines"),
            "switch_bus":
            prune_impact_array(impact["topology"], "bus_switch"),
            "redispatch":
            prune_impact_array(impact["redispatch"], "generators")
        }
        compact_impact = {
            k: v
            for k, v in pruned_impact.items() if v is not None
        }
        actions_dict[index_str] = compact_impact

    actions_json = json.dumps(actions_dict, indent=2, cls=NpEncoder)
    print(actions_json)


if __name__ == "__main__":
    args = cli()
    env = make2(args.path_data, action_class=PowerlineChangeAndDispatchAction)
    # Limit gpu usage
    physical_devices = tf.config.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(physical_devices[0], True)

    agent = DoubleDuelingDQN(env, env.action_space, is_training=False)
    print_actions(agent)
Exemplo n.º 9
0
 def test_l2rpn19_override_chronics(self):
     dataset_path = os.path.join(PATH_CHRONICS, "rte_L2RPN_2019")
     with make2(dataset_path, chronics_class=ChangeNothing) as env:
         assert isinstance(env.chronics_handler.real_data, ChangeNothing)
Exemplo n.º 10
0
 def test_l2rpn19_override_action(self):
     dataset_path = os.path.join(PATH_CHRONICS, "rte_L2RPN_2019")
     with make2(dataset_path, action_class=VoltageOnlyAction) as env:
         assert env.actionClass == VoltageOnlyAction
Exemplo n.º 11
0
 def test_case14_test_override_action(self):
     dataset_path = os.path.join(PATH_CHRONICS, "rte_case14_test")
     with make2(dataset_path, action_class=VoltageOnlyAction) as env:
         assert env.actionClass == VoltageOnlyAction
Exemplo n.º 12
0
 def test_l2rpn19_override_reward(self):
     dataset_path = os.path.join(PATH_CHRONICS, "rte_L2RPN_2019")
     with make2(dataset_path, reward_class=FlatReward) as env:
         assert env.rewardClass == FlatReward
Exemplo n.º 13
0
 def test_case14_test_override_reward(self):
     dataset_path = os.path.join(PATH_CHRONICS, "rte_case14_test")
     with make2(dataset_path, reward_class=FlatReward) as env:
         assert env.rewardClass == FlatReward
Exemplo n.º 14
0
                        required=False,
                        default=1e-5,
                        type=float,
                        help="Learning rate for the Adam optimizer")
    parser.add_argument("--resume",
                        required=False,
                        help="Path to model.h5 to resume training with")
    return parser.parse_args()


if __name__ == "__main__":
    args = cli()
    env = make2(args.path_data,
                action_class=CustomAction,
                reward_class=RedispReward,
                other_rewards={
                    "bridge": BridgeReward,
                    "close_to_of": CloseToOverflowReward,
                    "distance": DistanceReward
                })

    # Limit gpu usage
    physical_devices = tf.config.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(physical_devices[0], True)

    agent = DDDQNAgent(env,
                       env.action_space,
                       name=args.name,
                       is_training=True,
                       batch_size=args.batch_size,
                       num_frames=args.num_frames,
                       lr=args.learning_rate)