コード例 #1
0
def test_step_new_grid_import_peak_end_of_month():
    """
    Test where we get a new grid import peak, and since it is the
    "end of the month" = episode length,
    we also get reward for grid tariff
    """
    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 2, 3],
            "pv_production": [2, 12, 5, 4],
            "wind_production": [3, 2, 1, 4],
            "spot_market_price": [0.0, 0.4, 0.2, 0.1],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )
    env = RyeFlexEnv(data=data,
                     episode_length=timedelta(hours=2),
                     charge_loss_hydrogen=0.5)
    input_grid_import = 1.0

    env.reset(
        start_time=datetime(2020, 1, 1, 13),
        battery_storage=8,
        grid_import=input_grid_import,
    )

    # Check that the correct grid peak was set
    input_action = np.array([-2, 10])
    assert env._state.grid_import_peak == input_grid_import

    _, _, res_done, res_info = env.step(input_action)

    # Check that the grid import and peak
    assert res_info["state"].grid_import == 4
    assert res_info["state"].grid_import_peak == 4
    assert not res_done

    res_new_state_vector, res_reward, res_done, res_info = env.step(
        input_action)

    res_new_state = res_info["state"]
    print(res_new_state)

    # Check grid import and peak
    assert res_done
    assert res_new_state.grid_import_peak == 4
    assert res_new_state.grid_import == 3

    assert res_reward == (
        (res_new_state.spot_market_price + env._grid_tariff) *
        res_new_state.grid_import +
        env._peak_grid_tariff * res_new_state.grid_import_peak)
コード例 #2
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"), index_col=0, parse_dates=True)

    env = RyeFlexEnv(data)
    
    print(env.observation_space, env.action_space)
    agent = Policy(env.observation_space.shape[0], env.action_space.shape[0])
    
    optimizer = optim.Adam(agent.parameters(), lr=learning_rate)
    
 
    for i in range(100):
        #plotter = RyeFlexEnvEpisodePlotter()
        info = None
        done = False
        # Initial state
        state = env._state.vector
        while not done:
            #state = torch.from_numpy(state).type(torch.FloatTensor)
            action = select_action(agent, state)
            state, reward, done, info = env.step(action.detach().numpy())
            agent.reward_episode.append(-reward)
         #   plotter.update(info)
        update_policy(agent, optimizer)
        print(f"Your score is: {info['cumulative_reward']} NOK")
コード例 #3
0
def test_step_random_state():
    """
    Test that:
        - The state changes (also when not using when not using reset)
        - The state are in the desired state-space/observation-space
    """
    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 1, 3],
            "pv_production": [2, 2, 5, 4],
            "wind_production": [3, 2, 1, 4],
            "spot_market_price": [0.0, 0.4, 0.2, 0.1],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )
    env = RyeFlexEnv(data=data,
                     episode_length=timedelta(hours=2),
                     charge_loss_hydrogen=0.5)

    input_action = Action(charge_battery=-1.1, charge_hydrogen=-1.2).vector

    old_state_vector = env._state.vector

    res_state_vector, res_reward, res_done, res_info = env.step(input_action)

    res_state = env._state

    assert (res_state_vector != old_state_vector).any()

    assert (res_state_vector == res_state.vector).all()
    assert (res_state_vector <= env.observation_space.high).all()
    assert (res_state_vector >= env.observation_space.low).all()
    assert (res_info["state"].vector == res_state.vector).all()
コード例 #4
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data=data)
    env.reset(start_time=datetime(2021, 2, 1, 0, 0))
    plotter = RyeFlexEnvEpisodePlotter()

    # INSERT YOUR OWN ALGORITHM HERE
    agent = RandomActionAgent(env.action_space)

    # Example with random initial state
    info = {}
    done = False
    # Initial state
    state = env._state

    while not done:

        # INSERT YOUR OWN ALGORITHM HERE
        action = agent.get_action(state)

        state, reward, done, info = env.step(action)

        plotter.update(info)

    print(f"Your test score is: {info['cumulative_reward']} NOK")

    plotter.plot_episode()
コード例 #5
0
def test_attributes_time():
    """
    Test that all time attributes are handled correctly
    """

    data = pd.read_csv("data/train.csv", index_col=0, parse_dates=True)

    input_episode_length = timedelta(days=30)
    env = RyeFlexEnv(episode_length=input_episode_length, data=data)

    res_start_time = env._time
    res_end_time = env._episode_end_time

    assert input_episode_length == env._episode_length
    assert res_start_time + input_episode_length == res_end_time
    assert env._start_time_data <= res_start_time
    assert res_start_time <= env._end_time_data

    input_start_time = datetime(2020, 10, 1)
    env.reset(start_time=input_start_time)
    res_end_time = input_episode_length + env._time
    ans_end_time = datetime(2020, 10, 31)

    assert ans_end_time == res_end_time

    _, _, _, res_info = env.step(action=np.array([1, 1]))
    res_time = env._time

    assert res_time == env._time_resolution + input_start_time
    assert res_info["time"] == res_time
コード例 #6
0
def test_episodes():
    """
    Test to check that length of episode,
    cumulative reward and done signal are sent correctly
    """

    data = pd.read_csv("data/train.csv", index_col=0, parse_dates=True)

    env = RyeFlexEnv(data=data)
    plotter = RyeFlexEnvEpisodePlotter()
    length = int(env._episode_length.days * 24)

    # Example with random initial state
    done = False
    cumulative_reward = env._cumulative_reward




    while not done:
        action = env.action_space.sample()
        state, reward, done, info = env.step(action)
        new_cumulative_reward = info["cumulative_reward"]

        assert round(new_cumulative_reward - cumulative_reward, 5) == round(reward, 5)

        cumulative_reward = new_cumulative_reward
        plotter.update(info)

    assert len(plotter._states) == length

    plotter.plot_episode(show=True)
    
    mydata = np.array(wind)
    # scipy.io.savemat('wind.mat', mydata)

    # Example where environment are set to partial known state
    env.reset(start_time=datetime(2020, 2, 3), battery_storage=1)

    done = False
    while not done:
        action = env.action_space.sample()
        state, reward, done, info = env.step(action)
        plotter.update(info)

    assert len(plotter._states) == length
    plotter.plot_episode(show=False)
コード例 #7
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/test.csv"),
                       index_col=0,
                       parse_dates=True)
    env = RyeFlexEnv(data=data)
    env.reset(start_time=datetime(2021, 2, 1, 0, 0))
    data2 = pd.read_csv(join(root_dir, "data/train.csv"),
                        index_col=0,
                        parse_dates=True)
    data = pd.concat([data2, data])
    plotter = RyeFlexEnvEpisodePlotter()

    # INSERT YOUR OWN ALGORITHM HERE
    #agent = KalmanAgent(env.action_space)

    # Example with random initial state
    info = {}
    done = False
    # Initial state
    state = env._state.vector
    N = 28
    while not done:
        #PV = data.loc[env._time:env._time + N*env._time_resolution, "pv_production"]
        #W = data.loc[env._time:env._time + N*env._time_resolution, "wind_production"]
        #C = data.loc[env._time:env._time + N*env._time_resolution, "consumption"]
        spot = data.loc[env._time:env._time + N * env._time_resolution,
                        "spot_market_price"]
        #print("State t: ", state[0] - state[1] - state[2] + action[0] + action[1])

        C = data.loc[env._time - 47 * env._time_resolution:env._time,
                     "consumption"]
        PV = data.loc[env._time - 47 * env._time_resolution:env._time,
                      "pv_production"]
        Wind = data.loc[env._time:env._time + N * env._time_resolution,
                        "wind_speed_50m:ms"]
        Wind_prod_last = data.loc[env._time, "wind_production"]
        C_estim = [np.array(C[-1])]
        PV_estim = [np.array(PV[-1])]
        for i in range(N):
            c = get_predicted_consumption(C[-48:])
            C_estim.append(c)
            C = np.concatenate([C, c])
            pv = get_predicted_solar_power(PV[-48:])
            PV_estim.append(pv)
            PV = np.concatenate([PV, pv])
        # W = []
        # for x in Wind:
        #     W.append(get_predicted_wind_power(x))
        # W = np.array(W)
        W = get_predicted_wind_power_stupid(Wind, Wind_prod_last, N)
        C = np.hstack(C_estim)
        action = MPC_step(N, state[3:6], PV[1:], W[1:], C[1:], spot[1:])
        state, reward, done, info = env.step(action)
        print(env._time)
        plotter.update(info)

    print(f"Your test score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()
コード例 #8
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data)

    print(env.observation_space, env.action_space)

    base_actions = np.array([[1, 1], [1, 0], [0, 1], [1, 0.1], [0.1, 1]])
    actions = base_actions.copy()
    for i in [-2, -1.5, -1.2, -1, -0.1, -0.01, 0.01, 0.1]:
        actions = np.append(actions, base_actions * i, 0)
    #actions = np.array([[0.1,0], [-0.1,0], [0, 0.1], [0,-0.1]])

    agent = Policy(env.observation_space.shape[0] + 1, actions)
    print(list(agent.parameters()))

    lossFunc = nn.MSELoss()
    optimizer = optim.Adam(agent.parameters(), lr=learning_rate)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 30, 0.1)
    for i in range(30):
        if i == 29:
            plotter = RyeFlexEnvEpisodePlotter()
        env.reset()
        info = None
        done = False
        # Initial state
        state = env._state.vector
        state = np.append([state], [0])
        j = 0
        while not done:
            j += 1
            action, Q1 = agent(state)
            print(action)
            state, reward, done, info = env.step(action)
            state = np.append([state], [j])
            if not done:
                _, Q2 = agent(state)
                loss = lossFunc(Q1, reward + agent.gamma * Q2)
            else:
                reward = torch.FloatTensor(np.array(reward))
                reward.requires_grad = True
                loss = lossFunc(Q1, reward)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if i == 29:
                plotter.update(info)
        scheduler.step()
        if i % 1 == 0:
            print(f"Your score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()
    plt.plot(np.arange(len(agent.loss_history)), agent.loss_history)
    plt.show()
    print(list(agent.parameters()))
コード例 #9
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data=data)
    plotter = RyeFlexEnvEpisodePlotter()
    agent = ConstantActionAgent()

    # Example with random initial state
    info = {}
    done = False
    # Initial state
    state = env._state

    while not done:

        action = agent.get_action()

        state, reward, done, info = env.step(action)

        plotter.update(info)

    print(f"Your score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()

    # Example where environment are reset
    env.reset(start_time=datetime(2020, 2, 3), battery_storage=1)

    done = False
    while not done:
        action = agent.get_action()

        state, reward, done, info = env.step(action)

        plotter.update(info)

    print(f"Your score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()
コード例 #10
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)
    env = RyeFlexEnv(data=data)
    env.reset(start_time=datetime(2020, 2, 1, 0, 0))
    plotter = RyeFlexEnvEpisodePlotter()

    # INSERT YOUR OWN ALGORITHM HERE
    #agent = KalmanAgent(env.action_space)

    # Example with random initial state
    info = {}
    done = False
    # Initial state
    state = env._state.vector
    agent = RandomActionAgent(env.action_space)
    #agent = KalmanAgent(env.action_space, state)
    i = 0
    while not done:

        # INSERT YOUR OWN ALGORITHM HERE
        #print(state[0], data.at[env._time + env._time_resolution, "consumption"])
        state[0] = data.at[env._time + env._time_resolution, "consumption"]
        state[1] = data.at[env._time + env._time_resolution, "pv_production"]
        state[2] = data.at[env._time + env._time_resolution, "wind_production"]
        action = agent.get_action(state)

        #action = np.array([0,0])
        #print("State t: ", state[0] - state[1] - state[2] + action[0] + action[1])

        state, reward, done, info = env.step(action)

        #print("State t+1: ", state[0] - state[1] - state[2] + action[0] + action[1])

        plotter.update(info)
        i += 1
        # if i == 24*10:
        #     break

    print(f"Your test score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode(True)
コード例 #11
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"), index_col=0, parse_dates=True)

    env = RyeFlexEnv(data)

    agent = RandomActionAgent(action_space=env.action_space)
    plotter = RyeFlexEnvEpisodePlotter()
    info = None
    done = False
    # Initial state
    state = env._state

    while not done:
        action = agent.get_action()
        state, reward, done, info = env.step(action)
        plotter.update(info)

    print(f"Your score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()
コード例 #12
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data)

    print(env.observation_space, env.action_space)

    base_actions = np.array([[1, 1], [1, 0], [0, 1], [1, 0.1], [0.1, 1]])
    actions = base_actions.copy()
    for i in [-10, -5, -1, 5, 10]:
        actions = np.append(actions, base_actions * i, 0)

    agent = Policy(env.observation_space.shape[0], actions.shape[0])
    print(list(agent.parameters()))

    optimizer = optim.Adam(agent.parameters(), lr=learning_rate)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 30, 0.1)
    for i in range(100):
        #plotter = RyeFlexEnvEpisodePlotter()
        env.reset()
        info = None
        done = False
        # Initial state
        state = env._state.vector
        while not done:
            action = select_action(agent, state, actions)
            state, reward, done, info = env.step(action)
            agent.reward_episode.append(-reward)
        #   plotter.update(info)
        update_policy(agent, optimizer)
        scheduler.step()
        if i % 1 == 0:
            print(f"Your score is: {info['cumulative_reward']} NOK")
        #plotter.plot_episode()
    plt.plot(np.arange(len(agent.loss_history)), agent.loss_history)
    plt.show()
    print(list(agent.parameters()))
コード例 #13
0
def test_step_saturation():
    """
    Test where we only look at the saturation of the actions
    """
    data = pd.read_csv("data/train.csv", index_col=0, parse_dates=True)
    env = RyeFlexEnv(data, charge_loss_hydrogen=0.5)

    env.reset(start_time=datetime(2020, 1, 3), battery_storage=400)

    input_action = Action(charge_hydrogen=1000000000,
                          charge_battery=-20000000000000000).vector

    res_new_state_vector, res_reward, res_done, res_info = env.step(
        input_action)

    ans_action = Action(charge_battery=-400, charge_hydrogen=55)
    res_action = res_info["action"]
    print(res_action)

    assert (res_action.vector == ans_action.vector).all()

    assert (res_action.vector >= env.action_space.low).all()
    assert (res_action.vector <= env.action_space.high).all()
コード例 #14
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/test.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data=data)
    plotter = RyeFlexEnvEpisodePlotter()
    data2 = pd.read_csv(join(root_dir, "data/train.csv"),
                        index_col=0,
                        parse_dates=True)
    data3 = pd.concat([data2, data])

    # Reset episode to feb 2021, and get initial state
    state = env.reset(start_time=datetime(2021, 2, 1, 0, 0))

    # INSERT YOUR OWN ALGORITHM HERE
    #agent = SimpleStateBasedAgent()

    info = {}
    done = False
    start = 15
    end = 19
    Val = 25
    ratio = (end - start) / (24 - start + end)
    while not done:
        if env._time.hour < end and env._time.hour > start:
            action = np.array([-Val, 0])
        else:
            action = np.array([ratio * Val, 0])
        state, reward, done, info = env.step(action)

        plotter.update(info)

    print(f"Your test score is: {info['cumulative_reward']} NOK")

    plotter.plot_episode()
コード例 #15
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data=data)
    plotter = RyeFlexEnvEpisodePlotter()
    agent = SimpleStateBasedAgent()

    # Get initial state
    state = env.get_state_vector()
    info = {}
    done = False

    while not done:
        action = agent.get_action(state)

        state, reward, done, info = env.step(action)

        plotter.update(info)

    print(f"Your score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()
コード例 #16
0
def test_step_not_import_from_grid():
    """
    Test where we charge hydrogen and battery, but do not import from grid
    due to producing enough power.
    """
    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 1, 3],
            "pv_production": [2, 20, 5, 4],
            "wind_production": [3, 2, 1, 4],
            "spot_market_price": [0.0, 0.4, 0.2, 0.1],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )
    env = RyeFlexEnv(data=data,
                     episode_length=timedelta(hours=2),
                     charge_loss_hydrogen=0.5)

    env.reset(start_time=datetime(2020, 1, 1, 12), hydrogen_storage=1668)

    input_action = Action(charge_hydrogen=10, charge_battery=10).vector

    old_state = env._state
    print(old_state)

    # Check that we set correct states from data
    assert old_state.consumption == 1
    assert old_state.pv_production == 2
    assert old_state.wind_production == 3
    assert old_state.spot_market_price == 0

    res_new_state_vector, res_reward, res_done, res_info = env.step(
        input_action)

    res_new_state = env._state
    res_action = res_info["action"]

    # Check that the state-vectors have correct value
    # and are within state space
    assert (res_new_state_vector != old_state.vector).any()
    assert (res_new_state_vector == res_new_state.vector).all()
    assert (res_new_state_vector <= env.observation_space.high).all()
    assert (res_new_state_vector >= env.observation_space.low).all()
    assert (res_new_state.vector == res_new_state.vector).all()
    assert (res_info["state"].vector == res_new_state.vector).all()
    """
    Explanation of states:
    - Battery storage is set to 8.5, since we charged by 10, and have transformation
        losses of 85%, and had an initial state of 0.
    - Hydrogen storage is set to 1670, since we have transformation loss of 50%,
        , and had initial state of 1668 (max = 1670).
    - Grid import is set to 0, since we have:
        load = 2(consumption) + 10 (hydrogen) + 10.0 (battery) = 22
        production= 20 (solar) + 2(wind) = 22,
        grid_import = load - consumption = 0,
        meaning we do not need to import from the grid.
    """

    ans_new_state = State(
        consumption=2,
        wind_production=2,
        pv_production=20,
        spot_market_price=0.4,
        battery_storage=8.5,
        hydrogen_storage=1670,
        grid_import=0,
        grid_import_peak=0,
    )
    print(res_new_state)
    assert (ans_new_state.vector == res_new_state.vector).all()

    # Check that actions are calculated correctly.
    # Since all actions where charging, the actions are the same
    ans_action_vector = Action(charge_battery=10, charge_hydrogen=10).vector
    print(res_action)
    assert (res_action.vector == ans_action_vector).all()

    # Check that the reward are the correct value
    assert (
        res_reward == (ans_new_state.spot_market_price + env._grid_tariff) *
        ans_new_state.grid_import)
コード例 #17
0
def test_step_import_from_grid():
    """
    Test where we charge and discharge, but
     it does not meet the consumption demand, and need to import from grid.
    """
    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 1, 3],
            "pv_production": [2, 12, 5, 4],
            "wind_production": [3, 2, 1, 4],
            "spot_market_price": [0.0, 0.4, 0.2, 0.1],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )
    env = RyeFlexEnv(data=data,
                     episode_length=timedelta(hours=2),
                     charge_loss_hydrogen=0.5)

    env.reset(start_time=datetime(2020, 1, 1, 13), battery_storage=8)

    input_action = Action(charge_hydrogen=-2, charge_battery=10).vector

    old_state = env._state

    res_new_state_vector, res_reward, res_done, res_info = env.step(
        input_action)

    res_new_state = env._state
    res_action = res_info["action"]

    # Check that state-vectors have correct value and are within state space
    assert (res_new_state_vector != old_state.vector).any()
    assert (res_new_state_vector == res_new_state.vector).all()
    assert (res_new_state_vector <= env.observation_space.high).all()
    assert (res_new_state_vector >= env.observation_space.low).all()
    assert (res_info["state"].vector == res_new_state.vector).all()
    """
    Explanation of states:
    - We tried to discharge hydrogen, but since hydrogen_storage was 0,
        we could not discharge.
    - The battery storage  is increased by 8.5, compared to initial state of 8.
    - We need to import 3.5 from the grid since we have a load of
        1 (consumption) + 10 (battery) = 11  and production of 1 (wind) + 5 (pv) = 6
        , leading to grid_import = 11 - 6 = 5.
    - Grid_import_peak = 5,since initial peak = 0.
    """
    ans_new_state = State(
        consumption=1,
        wind_production=1,
        pv_production=5,
        spot_market_price=0.2,
        battery_storage=16.5,
        hydrogen_storage=0.0,
        grid_import=5,
        grid_import_peak=5,
    )
    print(res_new_state)
    assert (ans_new_state.vector == res_new_state.vector).all()

    # Check that actions are calculated correctly
    # charge_hydrogen = 0, since we could not discharge hydrogen due to being empty
    ans_action = Action(charge_hydrogen=0, charge_battery=10)
    print(res_action)
    assert (res_action.vector == ans_action.vector).all()

    assert (
        res_reward == (ans_new_state.spot_market_price + env._grid_tariff) *
        ans_new_state.grid_import)