示例#1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--input-type",
        default="blind",
        choices=["blind", "rgb", "depth", "rgbd"],
    )
    parser.add_argument("--model-path", default="", type=str)
    parser.add_argument("--task-config",
                        type=str,
                        default="configs/tasks/pointnav.yaml")
    args = parser.parse_args()

    config = get_config(args.task_config)

    agent_config = get_default_config()
    agent_config.INPUT_TYPE = args.input_type
    agent_config.MODEL_PATH = args.model_path
    agent_config.GOAL_SENSOR_UUID = config.TASK.GOAL_SENSOR_UUID

    agent = PPOAgent(agent_config)
    benchmark = habitat.Benchmark(config_paths=args.task_config)
    metrics = benchmark.evaluate(agent)

    for k, v in metrics.items():
        habitat.logger.info("{}: {:.3f}".format(k, v))
示例#2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--agent-type",
        default="orbslam2-rgbd",
        choices=["blind", "orbslam2-rgbd", "orbslam2-rgb-monod"],
    )
    parser.add_argument("--task-config",
                        type=str,
                        default="tasks/pointnav_rgbd.yaml")
    args = parser.parse_args()

    config = get_config()
    agent_config = cfg_baseline()
    config.defrost()
    config.BASELINE = agent_config.BASELINE
    make_good_config_for_orbslam2(config)

    if args.agent_type == "blind":
        agent = BlindAgent(config.TRAINER.ORBSLAM2)
    elif args.agent_type == "orbslam2-rgbd":
        agent = ORBSLAM2Agent(config.TRAINER.ORBSLAM2)
    elif args.agent_type == "orbslam2-rgb-monod":
        agent = ORBSLAM2MonodepthAgent(config.TRAINER.ORBSLAM2)
    else:
        raise ValueError(args.agent_type, "is unknown type of agent")
    benchmark = habitat.Benchmark(args.task_config)
    metrics = benchmark.evaluate(agent)
    for k, v in metrics.items():
        habitat.logger.info("{}: {:.3f}".format(k, v))
示例#3
0
def test_ppo_agents():

    agent_config = ppo_agents.get_default_config()
    agent_config.MODEL_PATH = ""
    agent_config.defrost()
    config_env = habitat.get_config(config_paths=CFG_TEST)
    if not os.path.exists(config_env.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    benchmark = habitat.Benchmark(config_paths=CFG_TEST)

    for input_type in ["blind", "rgb", "depth", "rgbd"]:
        for resolution in [256, 384]:
            config_env.defrost()
            config_env.SIMULATOR.AGENT_0.SENSORS = []
            if input_type in ["rgb", "rgbd"]:
                config_env.SIMULATOR.AGENT_0.SENSORS += ["RGB_SENSOR"]
                agent_config.RESOLUTION = resolution
                config_env.SIMULATOR.RGB_SENSOR.WIDTH = resolution
                config_env.SIMULATOR.RGB_SENSOR.HEIGHT = resolution
            if input_type in ["depth", "rgbd"]:
                config_env.SIMULATOR.AGENT_0.SENSORS += ["DEPTH_SENSOR"]
                agent_config.RESOLUTION = resolution
                config_env.SIMULATOR.DEPTH_SENSOR.WIDTH = resolution
                config_env.SIMULATOR.DEPTH_SENSOR.HEIGHT = resolution

            config_env.freeze()

            del benchmark._env
            benchmark._env = habitat.Env(config=config_env)
            agent_config.INPUT_TYPE = input_type

            agent = ppo_agents.PPOAgent(agent_config)
            habitat.logger.info(benchmark.evaluate(agent, num_episodes=10))
示例#4
0
def run_cfg(cfg, uuid):
    if cfg['eval_kwargs']['exp_path'] is not None:
        # Process exp path
        exp_paths = [cfg['eval_kwargs']['exp_path']]

        # Set up config with the first exp only
        metadata_dir = get_subdir(exp_paths[0], 'metadata')
        config_path = os.path.join(metadata_dir, 'config.json')

        # Load config
        with open(config_path) as config:
            config_data = json.load(config)

        # Update configs
        config_data['uuid'] += '_benchmark' + uuid
        config_data['cfg']['saving']['log_dir'] += '/benchmark'
        config_data['cfg']['saving']['visdom_log_file'] = os.path.join(config_data['cfg']['saving']['log_dir'], 'visdom_logs.json')
        config_data['cfg']['learner']['test'] = True

        if cfg['eval_kwargs']['overwrite_configs']:
            config_data['cfg'] = update_dict_deepcopy(config_data['cfg'], cfg)

        set_seed(config_data['cfg']['training']['seed'])

        # Get checkpoints
        ckpt_paths = []
        for exp_path in exp_paths:
            ckpts_dir = get_subdir(exp_path, 'checkpoints')
            ckpt_path = os.path.join(ckpts_dir, 'ckpt-latest.dat')
            ckpt_paths.append(ckpt_path)
    else:
        config_data = { 'cfg': cfg, 'uuid': uuid }
        ckpt_paths = [None]
        exp_paths = [LOG_DIR]

    if 'eval_kwargs' in cfg and 'debug' in cfg['eval_kwargs']:
        if cfg['eval_kwargs']['debug']:
            config_data['cfg']['saving']['logging_type'] = 'visdom'
            config_data['cfg']['saving']['save_eval_videos'] = True
        else:
            config_data['cfg']['saving']['save_eval_videos'] = False

    print(pprint.pformat(config_data))
    print('Loaded:', config_data['uuid'])
    agent = HabitatAgent(ckpt_path=ckpt_paths[0], config_data=config_data)

    if cfg['eval_kwargs']['challenge']:
        challenge = habitat.Challenge()
        challenge.submit(agent)
    else:
        benchmark = habitat.Benchmark(config_file=cfg['eval_kwargs']['benchmark_config'], config_dir='/')
        metrics = benchmark.evaluate(agent, cfg['eval_kwargs']['benchmark_episodes'])
        agent.finish_benchmark(metrics)
        benchmark._env.close()

        everything = update_dict_deepcopy(metrics, config_data)
        patience, unstuck_dist = config_data['cfg']['learner']['backout']['patience'], config_data['cfg']['learner']['backout']['unstuck_dist']
        write_location = os.path.join(exp_paths[0], f'benchmark_data_p{patience}_d{unstuck_dist}.json')
        with open(write_location, 'w') as outfile:
            json.dump(everything, outfile)
示例#5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--task-config", type=str, default="configs/tasks/pointnav.yaml"
    )
    args = parser.parse_args()

    agent = ForwardOnlyAgent()
    benchmark = habitat.Benchmark(args.task_config)
    metrics = benchmark.evaluate(agent, num_episodes=10)

    for k, v in metrics.items():
        print("{}: {:.3f}".format(k, v))
示例#6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--task-config",
                        type=str,
                        default="tasks/pointnav.yaml")
    parser.add_argument("--agent_class", type=str, default="GoalFollower")
    args = parser.parse_args()

    agent = get_agent_cls(args.agent_class)(habitat.get_config(
        args.task_config))
    benchmark = habitat.Benchmark(args.task_config)
    metrics = benchmark.evaluate(agent)

    for k, v in metrics.items():
        habitat.logger.info("{}: {:.3f}".format(k, v))
示例#7
0
def test_simple_agents():
    config_env = habitat.get_config(config_paths=CFG_TEST)

    if not os.path.exists(config_env.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    benchmark = habitat.Benchmark(config_paths=CFG_TEST)

    for agent_class in [
            simple_agents.ForwardOnlyAgent,
            simple_agents.GoalFollower,
            simple_agents.RandomAgent,
            simple_agents.RandomForwardAgent,
    ]:
        agent = agent_class(config_env.TASK.SUCCESS_DISTANCE)
        habitat.logger.info(agent_class.__name__)
        habitat.logger.info(benchmark.evaluate(agent, num_episodes=100))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--input-type",
        default="blind",
        choices=["blind", "rgb", "depth", "rgbd"],
    )
    parser.add_argument("--model-path", default="", type=str)
    parser.add_argument(
        "--task-config", type=str, default="configs/tasks/pointnav.yaml"
    )
    parser.add_argument(
        "--num-episodes", type=int, default=50
    )
    # frame rate defines the number of frame per action you want for videos generated
    parser.add_argument(
        "--frame-rate", type=int, default=1
    )
    # control period defines the amount of time an agent should take to complete
    # an action. Measured in seconds
    parser.add_argument(
        "--control-period", type=float, default=1.0
    )

    args = parser.parse_args()

    config = get_config(args.task_config)

    agent_config = get_default_config()
    agent_config.INPUT_TYPE = args.input_type
    agent_config.MODEL_PATH = args.model_path
    num_episodes = args.num_episodes
    frame_rate = args.frame_rate
    control_period = args.control_period

    agent = PPOAgent(agent_config)
    print("Establishing benchmark:")
    benchmark = habitat.Benchmark(config_paths=args.task_config, enable_physics=True)
    print("Evaluating:")
    metrics = benchmark.evaluate(agent, num_episodes=num_episodes, frame_rate=frame_rate, control_period=control_period) # eval 50 episodes for now

    for k, v in metrics.items():
        habitat.logger.info("{}: {:.3f}".format(k, v))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--success-distance", type=float, default=0.2)
    parser.add_argument(
        "--task-config", type=str, default="configs/tasks/pointnav.yaml"
    )
    parser.add_argument("--agent-class", type=str, default="GoalFollower")
    args = parser.parse_args()

    config = get_config(args.task_config)

    agent = get_agent_cls(args.agent_class)(
        success_distance=args.success_distance,
        goal_sensor_uuid=config.TASK.GOAL_SENSOR_UUID,
    )
    benchmark = habitat.Benchmark(config_paths=args.task_config)
    metrics = benchmark.evaluate(agent)

    for k, v in metrics.items():
        habitat.logger.info("{}: {:.3f}".format(k, v))
示例#10
0
def test_ppo_agents():
    config = ppo_agents.get_defaut_config()
    config.MODEL_PATH = ""
    config_env = habitat.get_config(config_file=CFG_TEST)
    config_env.defrost()
    if not os.path.exists(config_env.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    benchmark = habitat.Benchmark(config_file=CFG_TEST, config_dir="configs")

    for input_type in ["blind", "rgb", "depth", "rgbd"]:
        config_env.defrost()
        config_env.SIMULATOR.AGENT_0.SENSORS = []
        if input_type in ["rgb", "rgbd"]:
            config_env.SIMULATOR.AGENT_0.SENSORS += ["RGB_SENSOR"]
        if input_type in ["depth", "rgbd"]:
            config_env.SIMULATOR.AGENT_0.SENSORS += ["DEPTH_SENSOR"]
        config_env.freeze()
        del benchmark._env
        benchmark._env = habitat.Env(config=config_env)
        config.INPUT_TYPE = input_type

        agent = ppo_agents.PPOAgent(config)
        habitat.logger.info(benchmark.evaluate(agent, num_episodes=10))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--input-type",
        default="blind",
        choices=["blind", "rgb", "depth", "rgbd"],
    )
    parser.add_argument("--model-path", default="", type=str)
    parser.add_argument(
        "--task-config", type=str, default="configs/tasks/pointnav.yaml"
    )
    parser.add_argument(
        "--num-episodes", type=int, default=50
    )
    # frame rate defines the number of frame per action you want for videos generated
    parser.add_argument(
        "--frame-rate", type=int, default=1
    )
    args = parser.parse_args()

    config = get_config(args.task_config)

    agent_config = get_default_config()
    agent_config.INPUT_TYPE = args.input_type
    agent_config.MODEL_PATH = args.model_path
    num_episodes = args.num_episodes
    frame_rate = args.frame_rate

    agent = PPOAgent(agent_config)
    print("Establishing benchmark:")
    benchmark = habitat.Benchmark(config_paths=args.task_config)
    print("Evaluating:")
    metrics = benchmark.evaluate(agent, num_episodes=num_episodes, frame_rate=frame_rate)

    for k, v in metrics.items():
        habitat.logger.info("{}: {:.3f}".format(k, v))
            config_data['cfg']['saving']['save_eval_videos'] = True
        else:
            config_data['cfg']['saving']['save_eval_videos'] = False

    print(pprint.pformat(config_data))
    print('Loaded:', config_data['uuid'])
    agent = HabitatAgent(ckpt_path=ckpt_paths[0], config_data=config_data)

    if cfg['eval_kwargs']['challenge']:
        challenge = habitat.Challenge()
        challenge.submit(agent)
    else:
<<<<<<< HEAD
        benchmark = habitat_benchmark.Benchmark(config_file=cfg['eval_kwargs']['benchmark_config'], config_dir='/')
=======
        benchmark = habitat.Benchmark(config_file=cfg['eval_kwargs']['benchmark_config'], config_dir='/')
>>>>>>> f37f84194141a13cacd06f1d12b08f1a4084e45c
        metrics = benchmark.evaluate(agent, cfg['eval_kwargs']['benchmark_episodes'])
        agent.finish_benchmark(metrics)
        benchmark._env.close()

        everything = update_dict_deepcopy(metrics, config_data)
        patience, unstuck_dist = config_data['cfg']['learner']['backout']['patience'], config_data['cfg']['learner']['backout']['unstuck_dist']
        write_location = os.path.join(exp_paths[0], f'benchmark_data_p{patience}_d{unstuck_dist}.json')
        with open(write_location, 'w') as outfile:
            json.dump(everything, outfile)


if __name__ == "__main__":
    ex.run_commandline()