def test_main(agent, env):
    env, _, out_params = env
    runner = Runner(agent[1], env)
    runner.run(1)
    # env.history.df.to_hdf('tests/test_main.hd5', 'hist')
    df = env.history.df.head(100)
    df = df.reindex(sorted(df.columns), axis=1)
    df2 = pd.read_hdf('tests/test_main.hd5', 'hist').head(100)  # noqa
    df2 = df2.reindex(sorted(df2.columns), axis=1)
    assert df[out_params].to_numpy() == approx(df2[out_params].to_numpy(), 5e-2)
def test_main_paramchange(agent, env):
    params, agent = agent
    env, _, out_params = env
    runner = Runner(agent, env)
    params['voltP'].val = 4
    runner.run(1)
    # env.history.df.to_hdf('tests/test_main2.hd5', 'hist')
    df = env.history.df.head(50)
    df = df.reindex(sorted(df.columns), axis=1)
    df2 = pd.read_hdf('tests/test_main.hd5', 'hist').head(50)  # noqa
    df2 = df2.reindex(sorted(df2.columns), axis=1)
    assert df[out_params].to_numpy() != approx(df2[out_params].to_numpy(), 5e-3)

    df2 = pd.read_hdf('tests/test_main2.hd5', 'hist').head(50)  # noqa
    df2 = df2.reindex(sorted(df2.columns), axis=1)
    assert df[out_params].to_numpy() == approx(df2[out_params].to_numpy(), 5e-2)
def test_simpleagent(env):
    np.random.seed(1)
    env, inputs, out_params = env

    class RndAgent(Agent):
        def act(self, obs: pd.Series) -> np.ndarray:
            return np.random.random(len(inputs))

    agent = RndAgent()
    runner = Runner(agent, env)
    runner.run(1)

    # env.history.df.to_hdf('tests/test_main3.hd5', 'hist')
    df = env.history.df.head(50)
    df = df.reindex(sorted(df.columns), axis=1)
    df2 = pd.read_hdf('tests/test_main3.hd5', 'hist').head(50)  # noqa
    df2 = df2.reindex(sorted(df2.columns), axis=1)
    assert df[out_params].to_numpy() == approx(df2[out_params].to_numpy(), 5e-3)
                  for i in '123'], [f'slave.SPI{i}' for i in 'dq0']],
                callback=update_legend,
                color=[['b', 'r', 'g'], ['b', 'r', 'g']],
                style=[[None], ['--']],
                title=
                'Example of using an timevariant external current reference'),
        ],
        log_level=logging.INFO,
        max_episode_steps=max_episode_steps,
        model_params={
            'rl1.resistor1.R': partial(load_step, gain=20),
            'rl1.resistor2.R': partial(load_step, gain=20),
            'rl1.resistor3.R': partial(load_step, gain=20),
            'rl1.inductor1.L': 0.001,
            'rl1.inductor2.L': 0.001,
            'rl1.inductor3.L': 0.001
        },
        model_path='../omg_grid/grid.network.fmu',
        net=net)

    # User runner to execute num_episodes-times episodes of the env controlled by the agent
    runner = Runner(agent, env)

    def timeshift(component, t):
        if t > .1:
            return dict(i_ref=np.array([30, 0, 0]))
        return dict(i_ref=np.array([5, 0, 0]))

    net['inverter2'].post_calculate_hook = timeshift
    runner.run(num_episodes, visualise=True)
Esempio n. 5
0
import gym
import numpy as np
import pandas as pd
from openmodelica_microgrid_gym import Agent, Runner


class RndAgent(Agent):
    def act(self, obs: pd.Series) -> np.ndarray:
        return self.env.action_space.sample()


if __name__ == '__main__':
    env = gym.make(
        'openmodelica_microgrid_gym:ModelicaEnv-v1',
        model_input=['i1p1', 'i1p2', 'i1p3'],
        model_output=dict(lc1=['inductor1.i', 'inductor2.i', 'inductor3.i']),
        model_path=
        '../OpenModelica_Microgrids/OpenModelica_Microgrids.Grids.Network.fmu')

    agent = RndAgent()
    runner = Runner(agent, env)

    runner.run(1)