示例#1
0
def debug_confirm_arena_config(env_path, arena_config):
    env_path = (env_path.strip().replace('.app', '').replace(
        '.exe', '').replace('.x86_64', '').replace('.x86', ''))

    from animalai.envs.environment import UnityEnvironment

    env = UnityEnvironment(n_arenas=16,
                           file_name=env_path,
                           worker_id=1,
                           seed=0,
                           docker_training=False,
                           play=True)
    env.reset(arenas_configurations=arena_config)

    try:
        while True:
            continue
    except KeyboardInterrupt:
        env.close()
示例#2
0
def main(args):
    docker_training = docker_target_name is not None

    env = UnityEnvironment(
        n_arenas=args.n_arenas,
        file_name=env_path,
        worker_id=worker_id,
        seed=seed,
        docker_training=docker_training,
        play=False,
        resolution=resolution
    )

    arena_config_in = ArenaConfig('configs/3-Obstacles.yaml')
    env.reset(arenas_configurations=arena_config_in)

    start_time = time.time()
    for i in range(args.frames):
        res = env.step(np.random.randint(0, 3, size=2 * args.n_arenas))

    elapsed_time = time.time() - start_time
    fps = float(args.frames) / elapsed_time
    print("n_arenas={0}, fps={1:.3f}".format(args.n_arenas, fps))
    env.close()
示例#3
0
            # let the agent generate an action based on the information
            action = agent.step(obs, reward, done, info)

            # Visualization{visual, direction, path}
            image.set_data(obs[0])
            if agent.chaser.newest_path is not None:
                sca.set_offsets(np.array(agent.chaser.newest_path))
            else:
                sca.set_offsets(AgentConstants.standpoint[::-1])
            if agent.chaser.newest_end is not None:
                line.set_xdata(
                    [AgentConstants.standpoint[1], agent.chaser.newest_end[0]])
                line.set_ydata(
                    [AgentConstants.standpoint[0], agent.chaser.newest_end[1]])
            else:
                line.set_xdata([])
                line.set_ydata([])
            fig.canvas.draw()
            fig.canvas.flush_events()

            # go to next test if the current one is finised
            if all(brainInfo['Learner'].local_done):
                break
            else:
                brainInfo = env.step(action)

# cleanup
plt.close(fig)
env.close()