예제 #1
0
 def _partial_regen(self, n_new_sequences=1):
     if self.regen == "navreptrain":
         env = NavRepTrainEnv(silent=True, scenario='train', adaptive=False)
         env.soadrl_sim.human_num = 20
         data = generate_vae_dataset(
             env,
             n_sequences=n_new_sequences,
             policy=ORCAPolicy(suicide_if_stuck=True),
             render=False,
             archive_dir=None)
         if self.pre_convert_obs:
             data["obs"] = scans_to_lidar_obs(data["scans"],
                                              self.lidar_mode,
                                              self.rings_def,
                                              self.channel_first)
     else:
         print("Regen {} failed".format(self.regen))
         return
     for k in self.data.keys():
         N = len(data[k])  # should be the same for each key
         # check end inside loop to avoid having to pick an arbitrary key
         if self.regen_head_index + N > len(self.data[k]):
             self.regen_head_index = 0
         # replace data
         i = self.regen_head_index
         self.data[k][i:i + N] = data[k]
     self.regen_head_index += N
예제 #2
0
from navrep.envs.navreptrainenv import NavRepTrainEnv
from navrep.tools.commonargs import parse_common_args
from navrep.scripts.test_navrep import run_test_episodes


class LuciaPolicy(object):
    """ legacy SOADRL policy from lucia's paper, takes in agents state, local map """
    def __init__(self, env):
        self.env = env

    def act(self, obs):
        state, local_map = obs
        return self.env.soadrl_sim.robot.act(state, local_map)


if __name__ == '__main__':
    args, _ = parse_common_args()

    env = NavRepTrainEnv(silent=True, scenario='test', legacy_mode=True)
    policy = LuciaPolicy(env)

    run_test_episodes(env, policy, render=args.render)
예제 #3
0
import os

from navrep.tools.envplayer import EnvPlayer
from navrep.envs.navreptrainenv import NavRepTrainEnv

if __name__ == "__main__":
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"  # disable GPU

    env = NavRepTrainEnv()
    player = EnvPlayer(env)
예제 #4
0
import numpy as np
from timeit import default_timer as timer
from tqdm import tqdm

from navrep.envs.navreptrainenv import NavRepTrainEnv
from navrep.tools.commonargs import parse_common_args

if __name__ == "__main__":
    args, _ = parse_common_args()

    n = args.n
    if n is None:
        n = 1000000

    env = NavRepTrainEnv(scenario='train',
                         silent=True,
                         adaptive=False,
                         collect_statistics=False)
    env.reset()

    action = np.array([0., 0., 0.])

    tic = timer()
    for i in tqdm(range(n)):
        env.step(action)
    toc = timer()
    elapsed = toc - tic

    print("Executed {} simulation steps in {:.1f} seconds.".format(n, elapsed))
예제 #5
0
        archive_dir = os.path.expanduser("~/navrep/datasets/V/marktwo")
        if args.dry_run:
            archive_dir = "/tmp/navrep/datasets/V/marktwo"
        env = MarkEnv(silent=True, maps=SECOND_TRAIN_MAPS)
        generate_vae_dataset(env,
                             n_sequences=n_sequences,
                             subset_index=args.subproc_id,
                             n_subsets=args.n_subprocs,
                             render=args.render,
                             archive_dir=archive_dir)
    if args.environment == "navreptrain":
        archive_dir = os.path.expanduser("~/navrep/datasets/V/navreptrain")
        if args.dry_run:
            archive_dir = "/tmp/navrep/datasets/V/navreptrain"
        env = NavRepTrainEnv(silent=True,
                             scenario='train',
                             adaptive=False,
                             collect_statistics=False)
        env.soadrl_sim.human_num = 20
        generate_vae_dataset(env,
                             n_sequences=n_sequences,
                             subset_index=args.subproc_id,
                             n_subsets=args.n_subprocs,
                             policy=ORCAPolicy(suicide_if_stuck=True),
                             render=args.render,
                             archive_dir=archive_dir)
    if args.environment == "irl":
        folder_to_archive(
            directory="~/rosbags/iros_rosbags",
            archive_dir=os.path.expanduser("~/navrep/datasets/V/irl"),
        )