def demo_small_map_wall_penalty(level_script):
    print("Loading deepmind_lab_gym from %s" % dlg.__file__)
    env = dlg.register_and_make(level_script
                                , dict(width=320, height=320, fps=30)
                                , dlg.ActionMapper("discrete")
                                , wall_penalty_max_dist = 30
                                , wall_penalty_max = 0.2)
    run_env_interactively(env)
def demo_small_map_test_mode(level_script):
    print("Loading deepmind_lab_gym from %s" % dlg.__file__)
    env = dlg.register_and_make(level_script
                                , dict(width=320, height=320, fps=30)
                                , dlg.ActionMapper("discrete")
                                , additional_observation_types=[
                                    "GOAL.LOC", "POSE", "GOAL.FOUND"])
    run_env_interactively(env)
def demo_discrete_big_steps(level_script):
    print("Loading deepmind_lab_gym from %s" % dlg.__file__)
    env = dlg.register_and_make(level_script
                                , dict(width=320, height=320, fps=30)
                                , dlg.ManhattanWorldActionMapper_v0
                                , additional_observation_types = ['POSE']
                                , init_game_seed = 0)
    run_env_interactively2(env)
def demo_small_star_map_continuous_spawn():
    print("Loading deepmind_lab_gym from %s" % dlg.__file__)
    level_script = "small_star_map_continuous_spawn_01"
    env = dlg.register_and_make(level_script
                          , dict(width=320, height=320, fps=30
                                , noclip="true")
                          , dlg.ActionMapper("discrete")
                               , additional_observation_types=["GOAL.LOC",
                                                               "POSE", "GOAL.FOUND"])
    run_env_interactively(env)
def demo_random_mazes(level_script="tests/demo_map",
                      multiproc=True,
                      multiproc_use_threads=False):
    env = dlg.register_and_make(level_script,
                                dict(width=84, height=84, fps=30),
                                dlg.ActionMapper("discrete"),
                                additional_observation_types=[])
    if multiproc:
        env = MultiProcGym(env, 3, use_threads=multiproc_use_threads)

    start_time = time.time()
    for i in range(600 * 30):
        obs, reward, terminal, info = env.step(env.action_space.sample())
        print("\r{}".format(i), end='')
        if terminal:
            print("env.reset() time_take = {}".format(time.time() -
                                                      start_time))
            start_time = time.time()
            env.reset()
def demo_random_mazes(level_script='random_mazes'
                      , rows=9
                      , cols=9
                      , mode='training'
                      , num_maps = 100):
    env = dlg.register_and_make(level_script
                                , dict(width=320, height=320, fps=30
                                       , rows = rows
                                       , cols = cols
                                       , mode = mode
                                       , num_maps = num_maps
                                       , random_spawn_random_goal = "True"
                                       , apple_prob = 0.1
                                       , episode_length_seconds = 20)
                                , dlg.ActionMapper("discrete")
                                , additional_observation_types=[
                                    "GOAL.LOC", "POSE", "GOAL.FOUND"]
                                , entry_point_object=mpdmlab.RandomMazesDMLab)

    run_env_interactively2(env)