Example #1
0
remote_base = os.getenv("RANGL_ENVIRONMENT_URL", "http://localhost:5000/")

client = Client(remote_base)

env_id = "reference-environment-v0"
seed = int(os.getenv("RANGL_SEED", 123456))
instance_id = client.env_create(env_id, seed)

client.env_monitor_start(
    instance_id,
    directory=f"monitor/{instance_id}",
    force=True,
    resume=False,
    video_callable=False,
)

client.env_reset(instance_id)
while True:
    action = client.env_action_space_sample(instance_id)
    observation, reward, done, info = client.env_step(instance_id, action)
    print(instance_id, reward)
    if done:
        print(instance_id)
        break

client.env_monitor_close(instance_id)
print("done", done)

# make sure you print the instance_id as the last line in the script
print(instance_id)
Example #2
0
env_id = "reference-environment-v0"
seed = int(os.getenv("RANGL_SEED", 123456))

instance_id = client.env_create(env_id, seed)

client.env_monitor_start(
    instance_id,
    directory=f"monitor/{instance_id}",
    force=True,
    resume=False,
    video_callable=False,
)

model = DDPG.load("MODEL_ALPHA_GENERATION.zip")

observation = client.env_reset(instance_id)

print(observation)

import numpy as np


def ObservationTransform(obs, H, transform, steps_per_episode=int(96)):
    step_count, generator_1_level, generator_2_level = obs[:3]
    agent_prediction = np.array(obs[3:])

    agent_horizon_prediction = agent_prediction[-1] * np.ones(
        steps_per_episode)
    agent_horizon_prediction[:int(steps_per_episode -
                                  step_count)] = agent_prediction[int(
                                      step_count):]  # inclusive index