Exemplo n.º 1
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data=data)
    env.reset(start_time=datetime(2021, 2, 1, 0, 0))
    plotter = RyeFlexEnvEpisodePlotter()

    # INSERT YOUR OWN ALGORITHM HERE
    agent = RandomActionAgent(env.action_space)

    # Example with random initial state
    info = {}
    done = False
    # Initial state
    state = env._state

    while not done:

        # INSERT YOUR OWN ALGORITHM HERE
        action = agent.get_action(state)

        state, reward, done, info = env.step(action)

        plotter.update(info)

    print(f"Your test score is: {info['cumulative_reward']} NOK")

    plotter.plot_episode()
Exemplo n.º 2
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"), index_col=0, parse_dates=True)

    env = RyeFlexEnv(data)
    
    print(env.observation_space, env.action_space)
    agent = Policy(env.observation_space.shape[0], env.action_space.shape[0])
    
    optimizer = optim.Adam(agent.parameters(), lr=learning_rate)
    
 
    for i in range(100):
        #plotter = RyeFlexEnvEpisodePlotter()
        info = None
        done = False
        # Initial state
        state = env._state.vector
        while not done:
            #state = torch.from_numpy(state).type(torch.FloatTensor)
            action = select_action(agent, state)
            state, reward, done, info = env.step(action.detach().numpy())
            agent.reward_episode.append(-reward)
         #   plotter.update(info)
        update_policy(agent, optimizer)
        print(f"Your score is: {info['cumulative_reward']} NOK")
Exemplo n.º 3
0
def test_step_random_state():
    """
    Test that:
        - The state changes (also when not using when not using reset)
        - The state are in the desired state-space/observation-space
    """
    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 1, 3],
            "pv_production": [2, 2, 5, 4],
            "wind_production": [3, 2, 1, 4],
            "spot_market_price": [0.0, 0.4, 0.2, 0.1],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )
    env = RyeFlexEnv(data=data,
                     episode_length=timedelta(hours=2),
                     charge_loss_hydrogen=0.5)

    input_action = Action(charge_battery=-1.1, charge_hydrogen=-1.2).vector

    old_state_vector = env._state.vector

    res_state_vector, res_reward, res_done, res_info = env.step(input_action)

    res_state = env._state

    assert (res_state_vector != old_state_vector).any()

    assert (res_state_vector == res_state.vector).all()
    assert (res_state_vector <= env.observation_space.high).all()
    assert (res_state_vector >= env.observation_space.low).all()
    assert (res_info["state"].vector == res_state.vector).all()
Exemplo n.º 4
0
def test_attributes_time():
    """
    Test that all time attributes are handled correctly
    """

    data = pd.read_csv("data/train.csv", index_col=0, parse_dates=True)

    input_episode_length = timedelta(days=30)
    env = RyeFlexEnv(episode_length=input_episode_length, data=data)

    res_start_time = env._time
    res_end_time = env._episode_end_time

    assert input_episode_length == env._episode_length
    assert res_start_time + input_episode_length == res_end_time
    assert env._start_time_data <= res_start_time
    assert res_start_time <= env._end_time_data

    input_start_time = datetime(2020, 10, 1)
    env.reset(start_time=input_start_time)
    res_end_time = input_episode_length + env._time
    ans_end_time = datetime(2020, 10, 31)

    assert ans_end_time == res_end_time

    _, _, _, res_info = env.step(action=np.array([1, 1]))
    res_time = env._time

    assert res_time == env._time_resolution + input_start_time
    assert res_info["time"] == res_time
Exemplo n.º 5
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/test.csv"),
                       index_col=0,
                       parse_dates=True)
    env = RyeFlexEnv(data=data)
    env.reset(start_time=datetime(2021, 2, 1, 0, 0))
    data2 = pd.read_csv(join(root_dir, "data/train.csv"),
                        index_col=0,
                        parse_dates=True)
    data = pd.concat([data2, data])
    plotter = RyeFlexEnvEpisodePlotter()

    # INSERT YOUR OWN ALGORITHM HERE
    #agent = KalmanAgent(env.action_space)

    # Example with random initial state
    info = {}
    done = False
    # Initial state
    state = env._state.vector
    N = 28
    while not done:
        #PV = data.loc[env._time:env._time + N*env._time_resolution, "pv_production"]
        #W = data.loc[env._time:env._time + N*env._time_resolution, "wind_production"]
        #C = data.loc[env._time:env._time + N*env._time_resolution, "consumption"]
        spot = data.loc[env._time:env._time + N * env._time_resolution,
                        "spot_market_price"]
        #print("State t: ", state[0] - state[1] - state[2] + action[0] + action[1])

        C = data.loc[env._time - 47 * env._time_resolution:env._time,
                     "consumption"]
        PV = data.loc[env._time - 47 * env._time_resolution:env._time,
                      "pv_production"]
        Wind = data.loc[env._time:env._time + N * env._time_resolution,
                        "wind_speed_50m:ms"]
        Wind_prod_last = data.loc[env._time, "wind_production"]
        C_estim = [np.array(C[-1])]
        PV_estim = [np.array(PV[-1])]
        for i in range(N):
            c = get_predicted_consumption(C[-48:])
            C_estim.append(c)
            C = np.concatenate([C, c])
            pv = get_predicted_solar_power(PV[-48:])
            PV_estim.append(pv)
            PV = np.concatenate([PV, pv])
        # W = []
        # for x in Wind:
        #     W.append(get_predicted_wind_power(x))
        # W = np.array(W)
        W = get_predicted_wind_power_stupid(Wind, Wind_prod_last, N)
        C = np.hstack(C_estim)
        action = MPC_step(N, state[3:6], PV[1:], W[1:], C[1:], spot[1:])
        state, reward, done, info = env.step(action)
        print(env._time)
        plotter.update(info)

    print(f"Your test score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data)

    print(env.observation_space, env.action_space)

    base_actions = np.array([[1, 1], [1, 0], [0, 1], [1, 0.1], [0.1, 1]])
    actions = base_actions.copy()
    for i in [-2, -1.5, -1.2, -1, -0.1, -0.01, 0.01, 0.1]:
        actions = np.append(actions, base_actions * i, 0)
    #actions = np.array([[0.1,0], [-0.1,0], [0, 0.1], [0,-0.1]])

    agent = Policy(env.observation_space.shape[0] + 1, actions)
    print(list(agent.parameters()))

    lossFunc = nn.MSELoss()
    optimizer = optim.Adam(agent.parameters(), lr=learning_rate)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 30, 0.1)
    for i in range(30):
        if i == 29:
            plotter = RyeFlexEnvEpisodePlotter()
        env.reset()
        info = None
        done = False
        # Initial state
        state = env._state.vector
        state = np.append([state], [0])
        j = 0
        while not done:
            j += 1
            action, Q1 = agent(state)
            print(action)
            state, reward, done, info = env.step(action)
            state = np.append([state], [j])
            if not done:
                _, Q2 = agent(state)
                loss = lossFunc(Q1, reward + agent.gamma * Q2)
            else:
                reward = torch.FloatTensor(np.array(reward))
                reward.requires_grad = True
                loss = lossFunc(Q1, reward)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if i == 29:
                plotter.update(info)
        scheduler.step()
        if i % 1 == 0:
            print(f"Your score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()
    plt.plot(np.arange(len(agent.loss_history)), agent.loss_history)
    plt.show()
    print(list(agent.parameters()))
Exemplo n.º 7
0
def test_loss_and_reward_attributes():
    """
    Test that all loss and reward constants are set correctly
    """
    input_charge_loss_battery = 0.8
    input_charge_loss_hydrogen = 0.6
    input_energy_grid_tariff = 0.051
    input_peak_grid_tariff = 49.0

    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 1, 3],
            "pv_production": [1, 2, 3, 4],
            "wind_production": [1, 2, 3, 4],
            "spot_market_price": [1, 2, 3, 4],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )

    env = RyeFlexEnv(
        charge_loss_battery=input_charge_loss_battery,
        charge_loss_hydrogen=input_charge_loss_hydrogen,
        grid_tariff=input_energy_grid_tariff,
        peak_grid_tariff=input_peak_grid_tariff,
        data=data,
        episode_length=timedelta(hours=2),
    )

    assert env._charge_loss_battery_storage == input_charge_loss_battery
    assert env._charge_loss_hydrogen_storage == input_charge_loss_hydrogen
    assert env._grid_tariff == input_energy_grid_tariff
    assert env._peak_grid_tariff == input_peak_grid_tariff
Exemplo n.º 8
0
def test_predicted_wind_power():
    #root_dir = dirname(abspath(join(__file__, "./")))
    data = pd.read_csv( "data/test.csv", index_col=0, parse_dates=True)
    env = RyeFlexEnv(data=data)
    time = data.index.min()
    
    time_delta = timedelta(days = 1)
    timeMax = time + time_delta*365
    w1 = np.array(data.loc[time :timeMax, "wind_speed_2m:ms"])
    w2 = data.loc[time :timeMax, "wind_speed_10m:ms"]
    w3 = data.loc[time :timeMax, "wind_speed_50m:ms"]
    w4 = data.loc[time :timeMax, "wind_speed_100m:ms"]
    P = np.array(data.loc[time :timeMax, "wind_production"])

    # estim = []
    # for data in w4:
    #     estim.append(get_predicted_wind_power(data))
    Wind_prod_last = data.loc[env._time, "wind_production"]
    estim = get_predicted_wind_power_stupid(w1,Wind_prod_last,28)


    plt.plot(np.arange(w3.shape[0]), w4, label='Wind')
    plt.plot(np.arange(w3.shape[0]), P, label='Power')
    plt.plot(np.arange(w3.shape[0]), estim, label = 'Estimated power')
    plt.legend()
    plt.show()
Exemplo n.º 9
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)
    env = RyeFlexEnv(data=data)
    env.reset(start_time=datetime(2020, 2, 1, 0, 0))
    plotter = RyeFlexEnvEpisodePlotter()

    # INSERT YOUR OWN ALGORITHM HERE
    #agent = KalmanAgent(env.action_space)

    # Example with random initial state
    info = {}
    done = False
    # Initial state
    state = env._state.vector
    agent = RandomActionAgent(env.action_space)
    #agent = KalmanAgent(env.action_space, state)
    i = 0
    while not done:

        # INSERT YOUR OWN ALGORITHM HERE
        #print(state[0], data.at[env._time + env._time_resolution, "consumption"])
        state[0] = data.at[env._time + env._time_resolution, "consumption"]
        state[1] = data.at[env._time + env._time_resolution, "pv_production"]
        state[2] = data.at[env._time + env._time_resolution, "wind_production"]
        action = agent.get_action(state)

        #action = np.array([0,0])
        #print("State t: ", state[0] - state[1] - state[2] + action[0] + action[1])

        state, reward, done, info = env.step(action)

        #print("State t+1: ", state[0] - state[1] - state[2] + action[0] + action[1])

        plotter.update(info)
        i += 1
        # if i == 24*10:
        #     break

    print(f"Your test score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode(True)
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"), index_col=0, parse_dates=True)

    env = RyeFlexEnv(data)

    agent = RandomActionAgent(action_space=env.action_space)
    plotter = RyeFlexEnvEpisodePlotter()
    info = None
    done = False
    # Initial state
    state = env._state

    while not done:
        action = agent.get_action()
        state, reward, done, info = env.step(action)
        plotter.update(info)

    print(f"Your score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()
Exemplo n.º 11
0
def test_step_new_grid_import_peak():
    """
    Test where we get a new grid import peak
    """
    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 1, 3],
            "pv_production": [2, 12, 5, 4],
            "wind_production": [3, 2, 1, 4],
            "spot_market_price": [0.0, 0.4, 0.2, 0.1],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )
    env = RyeFlexEnv(data=data,
                     episode_length=timedelta(hours=2),
                     charge_loss_hydrogen=0.5)
    input_grid_import = 1.0

    env.reset(
        start_time=datetime(2020, 1, 1, 13),
        battery_storage=8,
        grid_import=input_grid_import,
    )

    # Check that the correct grid peak was set
    input_action = np.array([-2, 10])
    assert env._state.grid_import_peak == input_grid_import

    res_new_state_vector, res_reward, res_done, res_info = env.step(
        input_action)

    res_new_state = res_info["state"]
    print(res_new_state)

    # Check that the new peak was set
    assert res_new_state.grid_import_peak == 3
    assert res_new_state.grid_import == 3

    assert (
        res_reward == (res_new_state.spot_market_price + env._grid_tariff) *
        res_new_state.grid_import)
Exemplo n.º 12
0
def test_get_possible_start_times():
    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 2, 3],
            "pv_production": [2, 12, 5, 4],
            "wind_production": [3, 2, 1, 4],
            "spot_market_price": [0.0, 0.4, 0.2, 0.1],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )
    env = RyeFlexEnv(data=data,
                     episode_length=timedelta(hours=2),
                     charge_loss_hydrogen=0.5)

    ans = [datetime(2020, 1, 1, 12), datetime(2020, 1, 1, 13)]

    res = env.get_possible_start_times()

    print(res)

    assert ans == res
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data)

    print(env.observation_space, env.action_space)

    base_actions = np.array([[1, 1], [1, 0], [0, 1], [1, 0.1], [0.1, 1]])
    actions = base_actions.copy()
    for i in [-10, -5, -1, 5, 10]:
        actions = np.append(actions, base_actions * i, 0)

    agent = Policy(env.observation_space.shape[0], actions.shape[0])
    print(list(agent.parameters()))

    optimizer = optim.Adam(agent.parameters(), lr=learning_rate)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 30, 0.1)
    for i in range(100):
        #plotter = RyeFlexEnvEpisodePlotter()
        env.reset()
        info = None
        done = False
        # Initial state
        state = env._state.vector
        while not done:
            action = select_action(agent, state, actions)
            state, reward, done, info = env.step(action)
            agent.reward_episode.append(-reward)
        #   plotter.update(info)
        update_policy(agent, optimizer)
        scheduler.step()
        if i % 1 == 0:
            print(f"Your score is: {info['cumulative_reward']} NOK")
        #plotter.plot_episode()
    plt.plot(np.arange(len(agent.loss_history)), agent.loss_history)
    plt.show()
    print(list(agent.parameters()))
Exemplo n.º 14
0
def test_reset():
    """
    Test that the reset function works (time resetting is tested in another test above)
    """

    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 1, 3],
            "pv_production": [1, 2, 3, 4],
            "wind_production": [1, 2, 3, 4],
            "spot_market_price": [1, 2, 3, 4],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )

    env = RyeFlexEnv(data=data, episode_length=timedelta(hours=2))

    env._cumulative_reward = 1000

    battery_storage = 0.1
    hydrogen_storage = 0.2
    grid_import = 0.3

    res_new_state_vector = env.reset(
        battery_storage=battery_storage,
        hydrogen_storage=hydrogen_storage,
        grid_import=grid_import,
    )
    res_new_state = env._state

    assert env._cumulative_reward == 0

    assert (res_new_state_vector == res_new_state.vector).all()
    assert (res_new_state_vector <= env.observation_space.high).all()
    assert (res_new_state_vector >= env.observation_space.low).all()

    assert res_new_state.battery_storage == battery_storage
    assert res_new_state.hydrogen_storage == hydrogen_storage
    assert res_new_state.grid_import == grid_import
Exemplo n.º 15
0
def test_step_saturation():
    """
    Test where we only look at the saturation of the actions
    """
    data = pd.read_csv("data/train.csv", index_col=0, parse_dates=True)
    env = RyeFlexEnv(data, charge_loss_hydrogen=0.5)

    env.reset(start_time=datetime(2020, 1, 3), battery_storage=400)

    input_action = Action(charge_hydrogen=1000000000,
                          charge_battery=-20000000000000000).vector

    res_new_state_vector, res_reward, res_done, res_info = env.step(
        input_action)

    ans_action = Action(charge_battery=-400, charge_hydrogen=55)
    res_action = res_info["action"]
    print(res_action)

    assert (res_action.vector == ans_action.vector).all()

    assert (res_action.vector >= env.action_space.low).all()
    assert (res_action.vector <= env.action_space.high).all()
Exemplo n.º 16
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/test.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data=data)
    plotter = RyeFlexEnvEpisodePlotter()
    data2 = pd.read_csv(join(root_dir, "data/train.csv"),
                        index_col=0,
                        parse_dates=True)
    data3 = pd.concat([data2, data])

    # Reset episode to feb 2021, and get initial state
    state = env.reset(start_time=datetime(2021, 2, 1, 0, 0))

    # INSERT YOUR OWN ALGORITHM HERE
    #agent = SimpleStateBasedAgent()

    info = {}
    done = False
    start = 15
    end = 19
    Val = 25
    ratio = (end - start) / (24 - start + end)
    while not done:
        if env._time.hour < end and env._time.hour > start:
            action = np.array([-Val, 0])
        else:
            action = np.array([ratio * Val, 0])
        state, reward, done, info = env.step(action)

        plotter.update(info)

    print(f"Your test score is: {info['cumulative_reward']} NOK")

    plotter.plot_episode()
Exemplo n.º 17
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data=data)
    plotter = RyeFlexEnvEpisodePlotter()
    agent = SimpleStateBasedAgent()

    # Get initial state
    state = env.get_state_vector()
    info = {}
    done = False

    while not done:
        action = agent.get_action(state)

        state, reward, done, info = env.step(action)

        plotter.update(info)

    print(f"Your score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()
Exemplo n.º 18
0
def test_ARmodel(model_of):
    data = pd.read_csv("data/test.csv", index_col=0, parse_dates=True)

    time_min = data.index.min()
    time_max = data.index.max()
    env = RyeFlexEnv(data=data)

    c = data.loc[time_min:time_max, model_of]
    estim = []
    y = []
    days = 10
    for i in range(400, 400 +24*days):
        if model_of == "consumption":
            estim.append(get_predicted_consumption(c[i:i+48]))
        if model_of == "pv_production":
            estim.append(get_predicted_solar_power(c[i:i+48]))
        y.append(c[i + 48])
    plt.plot(np.arange(0,24*days), estim, 'r', label="estimated")
    plt.plot(np.arange(0,24*days), y, 'g', label="real")
    plt.legend()
    plt.show()
Exemplo n.º 19
0
def test_episodes():
    """
    Test to check that length of episode,
    cumulative reward and done signal are sent correctly
    """

    data = pd.read_csv("data/train.csv", index_col=0, parse_dates=True)

    env = RyeFlexEnv(data=data)
    plotter = RyeFlexEnvEpisodePlotter()
    length = int(env._episode_length.days * 24)

    # Example with random initial state
    done = False
    cumulative_reward = env._cumulative_reward




    while not done:
        action = env.action_space.sample()
        state, reward, done, info = env.step(action)
        new_cumulative_reward = info["cumulative_reward"]

        assert round(new_cumulative_reward - cumulative_reward, 5) == round(reward, 5)

        cumulative_reward = new_cumulative_reward
        plotter.update(info)

    assert len(plotter._states) == length

    plotter.plot_episode(show=True)
    
    mydata = np.array(wind)
    # scipy.io.savemat('wind.mat', mydata)

    # Example where environment are set to partial known state
    env.reset(start_time=datetime(2020, 2, 3), battery_storage=1)

    done = False
    while not done:
        action = env.action_space.sample()
        state, reward, done, info = env.step(action)
        plotter.update(info)

    assert len(plotter._states) == length
    plotter.plot_episode(show=False)
Exemplo n.º 20
0
def test_state_space_and_action_space():
    """
    Test that state space are set correctly
    """

    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 1, 3],
            "pv_production": [1, 2, 3, 4],
            "wind_production": [1, 2, 3, 4],
            "spot_market_price": [1, 2, 3, 4],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )

    env = RyeFlexEnv(data=data, episode_length=timedelta(hours=2))

    assert (env.observation_space.low == env._state_space_min.vector).all()
    assert (env.observation_space.high == env._state_space_max.vector).all()

    assert (env.action_space.low == env._action_space_min.vector).all()
    assert (env.action_space.high == env._action_space_max.vector).all()
Exemplo n.º 21
0
def main() -> None:
    root_dir = dirname(abspath(join(__file__, "../")))
    data = pd.read_csv(join(root_dir, "data/train.csv"),
                       index_col=0,
                       parse_dates=True)

    env = RyeFlexEnv(data=data)
    plotter = RyeFlexEnvEpisodePlotter()
    agent = ConstantActionAgent()

    # Example with random initial state
    info = {}
    done = False
    # Initial state
    state = env._state

    while not done:

        action = agent.get_action()

        state, reward, done, info = env.step(action)

        plotter.update(info)

    print(f"Your score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()

    # Example where environment are reset
    env.reset(start_time=datetime(2020, 2, 3), battery_storage=1)

    done = False
    while not done:
        action = agent.get_action()

        state, reward, done, info = env.step(action)

        plotter.update(info)

    print(f"Your score is: {info['cumulative_reward']} NOK")
    plotter.plot_episode()
Exemplo n.º 22
0
def test_step_not_import_from_grid():
    """
    Test where we charge hydrogen and battery, but do not import from grid
    due to producing enough power.
    """
    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 1, 3],
            "pv_production": [2, 20, 5, 4],
            "wind_production": [3, 2, 1, 4],
            "spot_market_price": [0.0, 0.4, 0.2, 0.1],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )
    env = RyeFlexEnv(data=data,
                     episode_length=timedelta(hours=2),
                     charge_loss_hydrogen=0.5)

    env.reset(start_time=datetime(2020, 1, 1, 12), hydrogen_storage=1668)

    input_action = Action(charge_hydrogen=10, charge_battery=10).vector

    old_state = env._state
    print(old_state)

    # Check that we set correct states from data
    assert old_state.consumption == 1
    assert old_state.pv_production == 2
    assert old_state.wind_production == 3
    assert old_state.spot_market_price == 0

    res_new_state_vector, res_reward, res_done, res_info = env.step(
        input_action)

    res_new_state = env._state
    res_action = res_info["action"]

    # Check that the state-vectors have correct value
    # and are within state space
    assert (res_new_state_vector != old_state.vector).any()
    assert (res_new_state_vector == res_new_state.vector).all()
    assert (res_new_state_vector <= env.observation_space.high).all()
    assert (res_new_state_vector >= env.observation_space.low).all()
    assert (res_new_state.vector == res_new_state.vector).all()
    assert (res_info["state"].vector == res_new_state.vector).all()
    """
    Explanation of states:
    - Battery storage is set to 8.5, since we charged by 10, and have transformation
        losses of 85%, and had an initial state of 0.
    - Hydrogen storage is set to 1670, since we have transformation loss of 50%,
        , and had initial state of 1668 (max = 1670).
    - Grid import is set to 0, since we have:
        load = 2(consumption) + 10 (hydrogen) + 10.0 (battery) = 22
        production= 20 (solar) + 2(wind) = 22,
        grid_import = load - consumption = 0,
        meaning we do not need to import from the grid.
    """

    ans_new_state = State(
        consumption=2,
        wind_production=2,
        pv_production=20,
        spot_market_price=0.4,
        battery_storage=8.5,
        hydrogen_storage=1670,
        grid_import=0,
        grid_import_peak=0,
    )
    print(res_new_state)
    assert (ans_new_state.vector == res_new_state.vector).all()

    # Check that actions are calculated correctly.
    # Since all actions where charging, the actions are the same
    ans_action_vector = Action(charge_battery=10, charge_hydrogen=10).vector
    print(res_action)
    assert (res_action.vector == ans_action_vector).all()

    # Check that the reward are the correct value
    assert (
        res_reward == (ans_new_state.spot_market_price + env._grid_tariff) *
        ans_new_state.grid_import)
Exemplo n.º 23
0
def get_data():
    data = pd.read_csv("data/train.csv", index_col=0, parse_dates=True)

    env = RyeFlexEnv(data=data)

    print(env._measured_wind_production_data.copy)
Exemplo n.º 24
0
def test_step_import_from_grid():
    """
    Test where we charge and discharge, but
     it does not meet the consumption demand, and need to import from grid.
    """
    data = pd.DataFrame(
        data={
            "consumption": [1, 2, 1, 3],
            "pv_production": [2, 12, 5, 4],
            "wind_production": [3, 2, 1, 4],
            "spot_market_price": [0.0, 0.4, 0.2, 0.1],
        },
        index=pd.date_range("2020-1-1T12:00", periods=4, freq="H"),
    )
    env = RyeFlexEnv(data=data,
                     episode_length=timedelta(hours=2),
                     charge_loss_hydrogen=0.5)

    env.reset(start_time=datetime(2020, 1, 1, 13), battery_storage=8)

    input_action = Action(charge_hydrogen=-2, charge_battery=10).vector

    old_state = env._state

    res_new_state_vector, res_reward, res_done, res_info = env.step(
        input_action)

    res_new_state = env._state
    res_action = res_info["action"]

    # Check that state-vectors have correct value and are within state space
    assert (res_new_state_vector != old_state.vector).any()
    assert (res_new_state_vector == res_new_state.vector).all()
    assert (res_new_state_vector <= env.observation_space.high).all()
    assert (res_new_state_vector >= env.observation_space.low).all()
    assert (res_info["state"].vector == res_new_state.vector).all()
    """
    Explanation of states:
    - We tried to discharge hydrogen, but since hydrogen_storage was 0,
        we could not discharge.
    - The battery storage  is increased by 8.5, compared to initial state of 8.
    - We need to import 3.5 from the grid since we have a load of
        1 (consumption) + 10 (battery) = 11  and production of 1 (wind) + 5 (pv) = 6
        , leading to grid_import = 11 - 6 = 5.
    - Grid_import_peak = 5,since initial peak = 0.
    """
    ans_new_state = State(
        consumption=1,
        wind_production=1,
        pv_production=5,
        spot_market_price=0.2,
        battery_storage=16.5,
        hydrogen_storage=0.0,
        grid_import=5,
        grid_import_peak=5,
    )
    print(res_new_state)
    assert (ans_new_state.vector == res_new_state.vector).all()

    # Check that actions are calculated correctly
    # charge_hydrogen = 0, since we could not discharge hydrogen due to being empty
    ans_action = Action(charge_hydrogen=0, charge_battery=10)
    print(res_action)
    assert (res_action.vector == ans_action.vector).all()

    assert (
        res_reward == (ans_new_state.spot_market_price + env._grid_tariff) *
        ans_new_state.grid_import)