Exemplo n.º 1
0
def test_rl_env():
    config = get_config(CFG_TEST)
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    env = DummyRLEnv(config=config, dataset=None)
    env.episodes = [
        NavigationEpisode(
            episode_id="0",
            scene_id=config.SIMULATOR.SCENE,
            start_position=[-3.0133917, 0.04623024, 7.3064547],
            start_rotation=[0, 0.163276, 0, 0.98658],
            goals=[
                NavigationGoal(position=[-3.0133917, 0.04623024, 7.3064547])
            ],
            info={"geodesic_distance": 0.001},
        )
    ]

    done = False
    observation = env.reset()

    non_stop_actions = [
        v for v in range(len(SimulatorActions))
        if v != SimulatorActions.STOP.value
    ]
    for _ in range(config.ENVIRONMENT.MAX_EPISODE_STEPS):
        observation, reward, done, info = env.step(
            np.random.choice(non_stop_actions))

    # check for steps limit on environment
    assert done is True, "episodes should be over after max_episode_steps"

    env.reset()
    observation, reward, done, info = env.step(SimulatorActions.STOP.value)
    assert done is True, "done should be true after STOP action"

    env.close()
Exemplo n.º 2
0
def test_collisions():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.MEASUREMENTS = ["COLLISIONS"]
    config.freeze()
    env = habitat.Env(config=config, dataset=None)
    env.reset()

    for _ in range(20):
        _random_episode(env, config)

        env.reset()
        assert env.get_metrics()["collisions"] is None

        prev_collisions = 0
        prev_loc = env.sim.get_agent_state().position
        for _ in range(50):
            action = sample_non_stop_action(env.action_space)
            env.step(action)
            collisions = env.get_metrics()["collisions"]["count"]
            loc = env.sim.get_agent_state().position
            if (np.linalg.norm(loc - prev_loc) <
                    0.9 * config.SIMULATOR.FORWARD_STEP_SIZE
                    and action["action"] == MoveForwardAction.name):
                # Check to see if the new method of doing collisions catches
                # all the same collisions as the old method
                assert collisions == prev_collisions + 1

            # We can _never_ collide with standard turn actions
            if action["action"] != MoveForwardAction.name:
                assert collisions == prev_collisions

            prev_loc = loc
            prev_collisions = collisions

    env.close()
Exemplo n.º 3
0
def test_pointgoal_sensor():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = ["POINTGOAL_SENSOR"]
    config.TASK.POINTGOAL_SENSOR.DIMENSIONALITY = 3
    config.TASK.POINTGOAL_SENSOR.GOAL_FORMAT = "CARTESIAN"
    config.freeze()
    env = habitat.Env(config=config, dataset=None)

    # start position is checked for validity for the specific test scene
    valid_start_position = [-1.3731, 0.08431, 8.60692]
    expected_pointgoal = [0.1, 0.2, 0.3]
    goal_position = np.add(valid_start_position, expected_pointgoal)

    # starting quaternion is rotated 180 degree along z-axis, which
    # corresponds to simulator using z-negative as forward action
    start_rotation = [0, 0, 0, 1]

    env.episode_iterator = iter([
        NavigationEpisode(
            episode_id="0",
            scene_id=config.SIMULATOR.SCENE,
            start_position=valid_start_position,
            start_rotation=start_rotation,
            goals=[NavigationGoal(position=goal_position)],
        )
    ])

    env.reset()
    for _ in range(100):
        obs = env.step(sample_non_stop_action(env.action_space))
        pointgoal = obs["pointgoal"]
        # check to see if taking non-stop actions will affect static point_goal
        assert np.allclose(pointgoal, expected_pointgoal)

    env.close()
Exemplo n.º 4
0
    def __init__(self,
                 config_paths: Optional[str] = None,
                 eval_remote=False,
                 enable_physics=False) -> None:
        r"""..

        :param config_paths: file to be used for creating the environment
        :param eval_remote: boolean indicating whether evaluation should be run remotely or locally
        """
        config_env = get_config(config_paths)
        # embed top-down map and heading sensor in config
        config_env.defrost()
        config_env.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
        config_env.TASK.SENSORS.append("HEADING_SENSOR")
        config_env.freeze()
        self._eval_remote = eval_remote

        self._enable_physics = enable_physics

        if self._eval_remote is True:
            self._env = None
        else:
            self._env = BenchmarkingRLEnv(config=config_env)
Exemplo n.º 5
0
def test_tactile():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = ["PROXIMITY_SENSOR"]
    config.freeze()
    env = habitat.Env(config=config, dataset=None)
    env.reset()
    random.seed(1234)

    for _ in range(20):
        _random_episode(env, config)
        env.reset()

        action = env._sim.index_forward_action
        for _ in range(10):
            obs = env.step(action)
            proximity = obs["proximity"]
            assert 0.0 <= proximity
            assert 2.0 >= proximity

    env.close()
Exemplo n.º 6
0
def test_env():
    config = get_config(CFG_TEST)
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    env = habitat.Env(config=config, dataset=None)
    env.episodes = [
        NavigationEpisode(
            episode_id="0",
            scene_id=config.SIMULATOR.SCENE,
            start_position=[-3.0133917, 0.04623024, 7.3064547],
            start_rotation=[0, 0.163276, 0, 0.98658],
            goals=[NavigationGoal([-3.0133917, 0.04623024, 7.3064547])],
            info={"geodesic_distance": 0.001},
        )
    ]
    env.reset()

    non_stop_actions = [
        k for k, v in SIM_ACTION_TO_NAME.items()
        if v != SimulatorActions.STOP.value
    ]
    for _ in range(config.ENVIRONMENT.MAX_EPISODE_STEPS):
        act = np.random.choice(non_stop_actions)
        env.step(act)

    # check for steps limit on environment
    assert env.episode_over is True, ("episode should be over after "
                                      "max_episode_steps")

    env.reset()

    env.step(SIM_NAME_TO_ACTION[SimulatorActions.STOP.value])
    # check for STOP action
    assert env.episode_over is True, ("episode should be over after STOP "
                                      "action")

    env.close()
Exemplo n.º 7
0
def test_mp3d_eqa_sim():
    eqa_config = get_config(CFG_TEST)

    if not mp3d_dataset.Matterport3dDatasetV1.check_config_paths_exist(
        eqa_config.DATASET
    ):
        pytest.skip(
            "Please download Matterport3D EQA dataset to " "data folder."
        )

    dataset = make_dataset(
        id_dataset=eqa_config.DATASET.TYPE, config=eqa_config.DATASET
    )
    env = habitat.Env(config=eqa_config, dataset=dataset)
    env.episodes = dataset.episodes[:EPISODES_LIMIT]

    assert env
    env.reset()
    while not env.episode_over:
        action = env.action_space.sample()
        assert env.action_space.contains(action)
        obs = env.step(action)
        if not env.episode_over:
            assert "rgb" in obs, "RGB image is missing in observation."
            assert obs["rgb"].shape[:2] == (
                eqa_config.SIMULATOR.RGB_SENSOR.HEIGHT,
                eqa_config.SIMULATOR.RGB_SENSOR.WIDTH,
            ), (
                "Observation resolution {} doesn't correspond to config "
                "({}, {}).".format(
                    obs["rgb"].shape[:2],
                    eqa_config.SIMULATOR.RGB_SENSOR.HEIGHT,
                    eqa_config.SIMULATOR.RGB_SENSOR.WIDTH,
                )
            )

    env.close()
Exemplo n.º 8
0
def test_state_sensors():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = ["HEADING_SENSOR", "COMPASS_SENSOR", "GPS_SENSOR"]
    config.freeze()
    env = habitat.Env(config=config, dataset=None)
    env.reset()
    random.seed(123)
    np.random.seed(123)

    for _ in range(100):
        random_heading = np.random.uniform(-np.pi, np.pi)
        random_rotation = [
            0,
            np.sin(random_heading / 2),
            0,
            np.cos(random_heading / 2),
        ]
        env.episode_iterator = iter([
            NavigationEpisode(
                episode_id="0",
                scene_id=config.SIMULATOR.SCENE,
                start_position=[03.00611, 0.072447, -2.67867],
                start_rotation=random_rotation,
                goals=[],
            )
        ])

        obs = env.reset()
        heading = obs["heading"]
        assert np.allclose(heading, random_heading)
        assert np.allclose(obs["compass"], [0.0], atol=1e-5)
        assert np.allclose(obs["gps"], [0.0, 0.0], atol=1e-5)

    env.close()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--input-type",
        default="blind",
        choices=["blind", "rgb", "depth", "rgbd"],
    )
    parser.add_argument("--model-path", default="", type=str)
    parser.add_argument(
        "--task-config", type=str, default="configs/tasks/pointnav.yaml"
    )
    parser.add_argument(
        "--num-episodes", type=int, default=50
    )
    # frame rate defines the number of frame per action you want for videos generated
    parser.add_argument(
        "--frame-rate", type=int, default=1
    )
    args = parser.parse_args()

    config = get_config(args.task_config)

    agent_config = get_default_config()
    agent_config.INPUT_TYPE = args.input_type
    agent_config.MODEL_PATH = args.model_path
    num_episodes = args.num_episodes
    frame_rate = args.frame_rate

    agent = PPOAgent(agent_config)
    print("Establishing benchmark:")
    benchmark = habitat.Benchmark(config_paths=args.task_config)
    print("Evaluating:")
    metrics = benchmark.evaluate(agent, num_episodes=num_episodes, frame_rate=frame_rate)

    for k, v in metrics.items():
        habitat.logger.info("{}: {:.3f}".format(k, v))
def test_static_pointgoal_sensor():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = ["STATIC_POINTGOAL_SENSOR"]
    config.freeze()
    env = habitat.Env(config=config, dataset=None)

    # start position is checked for validity for the specific test scene
    valid_start_position = [-1.3731, 0.08431, 8.60692]
    expected_static_pointgoal = [0.1, 0.2, 0.3]
    goal_position = np.add(valid_start_position, expected_static_pointgoal)

    # starting quaternion is rotated 180 degree along z-axis, which
    # corresponds to simulator using z-negative as forward action
    start_rotation = [0, 0, 0, 1]

    env.episodes = [
        NavigationEpisode(
            episode_id="0",
            scene_id=config.SIMULATOR.SCENE,
            start_position=valid_start_position,
            start_rotation=start_rotation,
            goals=[NavigationGoal(goal_position)],
        )
    ]

    obs = env.reset()
    for _ in range(5):
        env.step(np.random.choice(NON_STOP_ACTIONS))
    static_pointgoal = obs["static_pointgoal"]

    # check to see if taking non-stop actions will affect static point_goal
    assert np.allclose(static_pointgoal, expected_static_pointgoal)
    env.close()
Exemplo n.º 11
0
def test_rl_env():
    config = get_config(CFG_TEST)
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    env = DummyRLEnv(config=config, dataset=None)
    env.episodes = [
        NavigationEpisode(
            episode_id="0",
            scene_id=config.SIMULATOR.SCENE,
            start_position=[03.00611, 0.072447, -2.67867],
            start_rotation=[0, 0.163276, 0, 0.98658],
            goals=[],
        )
    ]

    done = False
    observation = env.reset()

    non_stop_actions = [
        k for k, v in SIM_ACTION_TO_NAME.items()
        if v != SimulatorActions.STOP.value
    ]
    for _ in range(config.ENVIRONMENT.MAX_EPISODE_STEPS):
        observation, reward, done, info = env.step(
            np.random.choice(non_stop_actions))

    # check for steps limit on environment
    assert done is True, "episodes should be over after max_episode_steps"

    env.reset()
    observation, reward, done, info = env.step(
        SIM_NAME_TO_ACTION[SimulatorActions.STOP.value])
    assert done is True, "done should be true after STOP action"

    env.close()
def test_pointnav_episode_generator():
    config = get_config(CFG_TEST)
    config.defrost()
    config.DATASET.SPLIT = "val"
    config.ENVIRONMENT.MAX_EPISODE_STEPS = 500
    config.freeze()
    env = habitat.Env(config)
    env.seed(config.SEED)
    random.seed(config.SEED)
    generator = pointnav_generator.generate_pointnav_episode(
        sim=env.sim,
        shortest_path_success_distance=config.TASK.SUCCESS_DISTANCE,
        shortest_path_max_steps=config.ENVIRONMENT.MAX_EPISODE_STEPS,
    )
    episodes = []
    for i in range(NUM_EPISODES):
        episode = next(generator)
        episodes.append(episode)

    for episode in pointnav_generator.generate_pointnav_episode(
            sim=env.sim,
            num_episodes=NUM_EPISODES,
            shortest_path_success_distance=config.TASK.SUCCESS_DISTANCE,
            shortest_path_max_steps=config.ENVIRONMENT.MAX_EPISODE_STEPS,
            geodesic_to_euclid_min_ratio=0,
    ):
        episodes.append(episode)
    assert len(episodes) == 2 * NUM_EPISODES
    env.episodes = episodes

    for episode in episodes:
        check_shortest_path(env, episode)

    dataset = habitat.Dataset()
    dataset.episodes = episodes
    assert dataset.to_json(), "Generated episodes aren't json serializable."
Exemplo n.º 13
0
def test_dataset_splitting(split):

    dataset_config = get_config(CFG_TEST).DATASET
    dataset_config.defrost()
    dataset_config.SPLIT = split
    if not mp3d_dataset.Matterport3dDatasetV1.check_config_paths_exist(
            dataset_config):
        pytest.skip("Please download Matterport3D EQA dataset to data folder.")

    scenes = mp3d_dataset.Matterport3dDatasetV1.get_scenes_to_load(
        config=dataset_config)
    assert (len(scenes) >
            0), "Expected dataset contains separate episode file per scene."

    dataset_config.CONTENT_SCENES = scenes
    full_dataset = make_dataset(id_dataset=dataset_config.TYPE,
                                config=dataset_config)
    full_episodes = {(ep.scene_id, ep.episode_id)
                     for ep in full_dataset.episodes}

    dataset_config.CONTENT_SCENES = scenes[0:len(scenes) // 2]
    split1_dataset = make_dataset(id_dataset=dataset_config.TYPE,
                                  config=dataset_config)
    split1_episodes = {(ep.scene_id, ep.episode_id)
                       for ep in split1_dataset.episodes}

    dataset_config.CONTENT_SCENES = scenes[len(scenes) // 2:]
    split2_dataset = make_dataset(id_dataset=dataset_config.TYPE,
                                  config=dataset_config)
    split2_episodes = {(ep.scene_id, ep.episode_id)
                       for ep in split2_dataset.episodes}

    assert full_episodes == split1_episodes.union(
        split2_episodes), "Split dataset is not equal to full dataset"
    assert (len(split1_episodes.intersection(split2_episodes)) == 0
            ), "Intersection of split datasets is not the empty set"
Exemplo n.º 14
0
def init_sim():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    return make_sim(config.SIMULATOR.TYPE, config=config.SIMULATOR)
Exemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model-path", type=str, required=True)
    parser.add_argument("--sim-gpu-id", type=int, required=True)
    parser.add_argument("--pth-gpu-id", type=int, required=True)
    parser.add_argument("--num-processes", type=int, required=True)
    parser.add_argument("--hidden-size", type=int, default=512)
    parser.add_argument("--count-test-episodes", type=int, default=100)
    parser.add_argument(
        "--sensors",
        type=str,
        default="RGB_SENSOR,DEPTH_SENSOR",
        help="comma separated string containing different"
        "sensors to use, currently 'RGB_SENSOR' and"
        "'DEPTH_SENSOR' are supported",
    )
    parser.add_argument(
        "--task-config",
        type=str,
        default="configs/tasks/pointnav.yaml",
        help="path to config yaml containing information about task",
    )
    args = parser.parse_args()

    device = torch.device("cuda:{}".format(args.pth_gpu_id))

    env_configs = []
    baseline_configs = []

    for _ in range(args.num_processes):
        config_env = get_config(config_paths=args.task_config)
        config_env.defrost()
        config_env.DATASET.SPLIT = "val"

        agent_sensors = args.sensors.strip().split(",")
        for sensor in agent_sensors:
            assert sensor in ["RGB_SENSOR", "DEPTH_SENSOR"]
        config_env.SIMULATOR.AGENT_0.SENSORS = agent_sensors
        config_env.freeze()
        env_configs.append(config_env)

        config_baseline = cfg_baseline()
        baseline_configs.append(config_baseline)

    assert len(baseline_configs) > 0, "empty list of datasets"

    envs = habitat.VectorEnv(
        make_env_fn=make_env_fn,
        env_fn_args=tuple(
            tuple(zip(env_configs, baseline_configs,
                      range(args.num_processes)))),
    )

    ckpt = torch.load(args.model_path, map_location=device)

    actor_critic = Policy(
        observation_space=envs.observation_spaces[0],
        action_space=envs.action_spaces[0],
        hidden_size=512,
        goal_sensor_uuid=env_configs[0].TASK.GOAL_SENSOR_UUID,
    )
    actor_critic.to(device)

    ppo = PPO(
        actor_critic=actor_critic,
        clip_param=0.1,
        ppo_epoch=4,
        num_mini_batch=32,
        value_loss_coef=0.5,
        entropy_coef=0.01,
        lr=2.5e-4,
        eps=1e-5,
        max_grad_norm=0.5,
    )

    ppo.load_state_dict(ckpt["state_dict"])

    actor_critic = ppo.actor_critic

    observations = envs.reset()
    batch = batch_obs(observations)
    for sensor in batch:
        batch[sensor] = batch[sensor].to(device)

    episode_rewards = torch.zeros(envs.num_envs, 1, device=device)
    episode_spls = torch.zeros(envs.num_envs, 1, device=device)
    episode_success = torch.zeros(envs.num_envs, 1, device=device)
    episode_counts = torch.zeros(envs.num_envs, 1, device=device)
    current_episode_reward = torch.zeros(envs.num_envs, 1, device=device)

    test_recurrent_hidden_states = torch.zeros(args.num_processes,
                                               args.hidden_size,
                                               device=device)
    not_done_masks = torch.zeros(args.num_processes, 1, device=device)

    while episode_counts.sum() < args.count_test_episodes:
        with torch.no_grad():
            _, actions, _, test_recurrent_hidden_states = actor_critic.act(
                batch,
                test_recurrent_hidden_states,
                not_done_masks,
                deterministic=False,
            )

        outputs = envs.step([a[0].item() for a in actions])

        observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
        batch = batch_obs(observations)
        for sensor in batch:
            batch[sensor] = batch[sensor].to(device)

        not_done_masks = torch.tensor(
            [[0.0] if done else [1.0] for done in dones],
            dtype=torch.float,
            device=device,
        )

        for i in range(not_done_masks.shape[0]):
            if not_done_masks[i].item() == 0:
                episode_spls[i] += infos[i]["roomnavmetric"]
                if infos[i]["roomnavmetric"] > 0:
                    episode_success[i] += 1

        rewards = torch.tensor(rewards, dtype=torch.float,
                               device=device).unsqueeze(1)
        current_episode_reward += rewards
        episode_rewards += (1 - not_done_masks) * current_episode_reward
        episode_counts += 1 - not_done_masks
        current_episode_reward *= not_done_masks

    episode_reward_mean = (episode_rewards / episode_counts).mean().item()
    episode_spl_mean = (episode_spls / episode_counts).mean().item()
    episode_success_mean = (episode_success / episode_counts).mean().item()

    print("Average episode reward: {:.6f}".format(episode_reward_mean))
    print("Average episode success: {:.6f}".format(episode_success_mean))
    print("Average episode spl: {:.6f}".format(episode_spl_mean))
Exemplo n.º 16
0
def eval_checkpoint(checkpoint_path, args, writer, cur_ckpt_idx=0):
    env_configs = []
    baseline_configs = []
    device = torch.device("cuda", args.pth_gpu_id)

    for _ in range(args.num_processes):
        config_env = get_config(config_paths=args.task_config)
        config_env.defrost()
        config_env.DATASET.SPLIT = "val"

        agent_sensors = args.sensors.strip().split(",")
        for sensor in agent_sensors:
            assert sensor in ["RGB_SENSOR", "DEPTH_SENSOR"]
        config_env.SIMULATOR.AGENT_0.SENSORS = agent_sensors
        if args.video_option:
            config_env.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
            config_env.TASK.MEASUREMENTS.append("COLLISIONS")
        config_env.freeze()
        env_configs.append(config_env)

        config_baseline = cfg_baseline()
        baseline_configs.append(config_baseline)

    assert len(baseline_configs) > 0, "empty list of datasets"

    envs = habitat.VectorEnv(
        make_env_fn=make_env_fn,
        env_fn_args=tuple(
            tuple(
                zip(env_configs, baseline_configs, range(args.num_processes))
            )
        ),
    )

    ckpt = torch.load(checkpoint_path, map_location=device)

    actor_critic = Policy(
        observation_space=envs.observation_spaces[0],
        action_space=envs.action_spaces[0],
        hidden_size=512,
        goal_sensor_uuid=env_configs[0].TASK.GOAL_SENSOR_UUID,
    )
    actor_critic.to(device)

    ppo = PPO(
        actor_critic=actor_critic,
        clip_param=0.1,
        ppo_epoch=4,
        num_mini_batch=32,
        value_loss_coef=0.5,
        entropy_coef=0.01,
        lr=2.5e-4,
        eps=1e-5,
        max_grad_norm=0.5,
    )

    ppo.load_state_dict(ckpt["state_dict"])

    actor_critic = ppo.actor_critic

    observations = envs.reset()
    batch = batch_obs(observations)
    for sensor in batch:
        batch[sensor] = batch[sensor].to(device)

    episode_rewards = torch.zeros(envs.num_envs, 1, device=device)
    episode_spls = torch.zeros(envs.num_envs, 1, device=device)
    episode_success = torch.zeros(envs.num_envs, 1, device=device)
    episode_counts = torch.zeros(envs.num_envs, 1, device=device)
    current_episode_reward = torch.zeros(envs.num_envs, 1, device=device)

    test_recurrent_hidden_states = torch.zeros(
        args.num_processes, args.hidden_size, device=device
    )
    not_done_masks = torch.zeros(args.num_processes, 1, device=device)
    stats_episodes = set()

    rgb_frames = None
    if args.video_option:
        rgb_frames = [[]] * args.num_processes
        os.makedirs(args.video_dir, exist_ok=True)

    while episode_counts.sum() < args.count_test_episodes:
        current_episodes = envs.current_episodes()

        with torch.no_grad():
            _, actions, _, test_recurrent_hidden_states = actor_critic.act(
                batch,
                test_recurrent_hidden_states,
                not_done_masks,
                deterministic=False,
            )

        outputs = envs.step([a[0].item() for a in actions])

        observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
        batch = batch_obs(observations)
        for sensor in batch:
            batch[sensor] = batch[sensor].to(device)

        not_done_masks = torch.tensor(
            [[0.0] if done else [1.0] for done in dones],
            dtype=torch.float,
            device=device,
        )

        for i in range(not_done_masks.shape[0]):
            if not_done_masks[i].item() == 0:
                episode_spls[i] += infos[i]["spl"]
                if infos[i]["spl"] > 0:
                    episode_success[i] += 1

        rewards = torch.tensor(
            rewards, dtype=torch.float, device=device
        ).unsqueeze(1)
        current_episode_reward += rewards
        episode_rewards += (1 - not_done_masks) * current_episode_reward
        episode_counts += 1 - not_done_masks
        current_episode_reward *= not_done_masks

        next_episodes = envs.current_episodes()
        envs_to_pause = []
        n_envs = envs.num_envs
        for i in range(n_envs):
            if next_episodes[i].episode_id in stats_episodes:
                envs_to_pause.append(i)

            # episode ended
            if not_done_masks[i].item() == 0:
                stats_episodes.add(current_episodes[i].episode_id)
                if args.video_option:
                    generate_video(
                        args,
                        rgb_frames[i],
                        current_episodes[i].episode_id,
                        cur_ckpt_idx,
                        infos[i]["spl"],
                        writer,
                    )
                    rgb_frames[i] = []

            # episode continues
            elif args.video_option:
                frame = observations_to_image(observations[i], infos[i])
                rgb_frames[i].append(frame)

        # stop tracking ended episodes if they exist
        if len(envs_to_pause) > 0:
            state_index = list(range(envs.num_envs))
            for idx in reversed(envs_to_pause):
                state_index.pop(idx)
                envs.pause_at(idx)

            # indexing along the batch dimensions
            test_recurrent_hidden_states = test_recurrent_hidden_states[
                :, state_index
            ]
            not_done_masks = not_done_masks[state_index]
            current_episode_reward = current_episode_reward[state_index]

            for k, v in batch.items():
                batch[k] = v[state_index]

            if args.video_option:
                rgb_frames = [rgb_frames[i] for i in state_index]

    episode_reward_mean = (episode_rewards / episode_counts).mean().item()
    episode_spl_mean = (episode_spls / episode_counts).mean().item()
    episode_success_mean = (episode_success / episode_counts).mean().item()

    logger.info("Average episode reward: {:.6f}".format(episode_reward_mean))
    logger.info("Average episode success: {:.6f}".format(episode_success_mean))
    logger.info("Average episode SPL: {:.6f}".format(episode_spl_mean))

    writer.add_scalars(
        "eval_reward", {"average reward": episode_reward_mean}, cur_ckpt_idx
    )
    writer.add_scalars(
        "eval_SPL", {"average SPL": episode_spl_mean}, cur_ckpt_idx
    )
    writer.add_scalars(
        "eval_success", {"average success": episode_success_mean}, cur_ckpt_idx
    )
Exemplo n.º 17
0
def habitat_objectnav_make_env_fn(cfg,
                                  rank,
                                  log_dir,
                                  visdom_name='main',
                                  visdom_log_file=None,
                                  vis_interval=200,
                                  visdom_server='localhost',
                                  visdom_port='8097'):

    habitat_cfg = get_config(cfg.task.habitat.config_file)

    training_scenes = [
        'PX4nDJXEHrG', '5q7pvUzZiYa', 'S9hNv5qa7GM', 'ac26ZMwG7aT',
        '29hnd4uzFmX', '82sE5b5pLXE', 'p5wJjkQkbXX', 'B6ByNegPMKs',
        '17DRP5sb8fy', 'pRbA3pwrgk9', 'gZ6f7yhEvPG', 'HxpKQynjfin',
        'ZMojNkEp431', '5LpN3gDmAk7', 'dhjEzFoUFzH', 'vyrNrziPKCB',
        'sKLMLpTHeUy', '759xd9YjKW5', 'sT4fr6TAbpF', '1pXnuDYAj8r',
        'E9uDoFAP3SH', 'GdvgFV5R1Z5', 'rPc6DW4iMge', 'D7N2EKCX4Sj',
        'uNb9QFRL6hY', 'VVfe2KiqLaN', 'Vvot9Ly1tCj', 's8pcmisQ38h',
        'EDJbREhghzL', 'YmJkqBEsHnH', 'XcA2TqTSSAj', '7y3sRwLe3Va',
        'e9zR4mvMWw7', 'JeFG25nYj2p', 'VLzqgDo317F', 'kEZ7cmS4wCh',
        'r1Q1Z4BcV1o', 'qoiz87JEwZ2', '1LXtFkjw3qL', 'VFuaQ6m2Qom',
        'b8cTxDM8gDG', 'ur6pFq6Qu1A', 'V2XKFyX4ASd', 'Uxmj2M2itWa',
        'Pm6F8kyY3z2', 'PuKPg4mmafe', '8WUmhLawc2A', 'ULsKaCPVFJR',
        'r47D5H71a5s', 'jh4fc5c5qoQ', 'JF19kD82Mey', 'D7G3Y4RVNrH',
        'cV4RVeZvu5T', 'mJXqzFtmKg4', 'i5noydFURQK', 'aayBHfsNo7d'
    ]

    length = int(len(training_scenes) / cfg.training.num_envs)

    habitat_cfg.defrost()
    if rank < cfg.training.num_envs:
        habitat_cfg.DATASET.SPLIT = 'train'
    else:
        habitat_cfg.DATASET.SPLIT = 'val'

    cfg.defrost()
    habitat_cfg.TASK.CUSTOM_OBJECT_GOAL_SENSOR = habitat.Config()
    habitat_cfg.TASK.CUSTOM_OBJECT_GOAL_SENSOR.TYPE = 'CustomObjectSensor'
    habitat_cfg.TASK.CUSTOM_OBJECT_GOAL_SENSOR.GOAL_SPEC = "OBJECT_IMG"
    habitat_cfg.DATASET.CONTENT_SCENES = training_scenes[rank *
                                                         length:(rank + 1) *
                                                         length]
    habitat_cfg.freeze()
    cfg.task.habitat.TASK_CONFIG = habitat_cfg
    cfg.freeze()

    def filter_fn(episode):
        if episode.info['geodesic_distance'] > 3.0: return False
        else: return True

    dataset = make_dataset(habitat_cfg.DATASET.TYPE,
                           config=habitat_cfg.DATASET,
                           **{'filter_fn': filter_fn})
    env = ObjectNavENV(cfg, dataset=dataset)
    env.seed(rank)
    env = VisdomMonitor(env,
                        directory=os.path.join(log_dir, visdom_name),
                        video_callable=lambda x: x % vis_interval == 0,
                        uid=str(rank),
                        server=visdom_server,
                        port=visdom_port,
                        visdom_log_file=visdom_log_file,
                        visdom_env=visdom_name)

    return env
Exemplo n.º 18
0
 def __init__(self, config_paths: Optional[str] = None) -> None:
     self.action_history: Dict = defaultdict()
     self.agg_metrics: Dict = defaultdict(float)
     config_env = get_config(config_paths)
     self._env = Env(config=config_env)
Exemplo n.º 19
0
def test_mp3d_eqa_sim_correspondence():
    eqa_config = get_config(CFG_TEST)

    if not mp3d_dataset.Matterport3dDatasetV1.check_config_paths_exist(
        eqa_config.DATASET
    ):
        pytest.skip(
            "Please download Matterport3D EQA dataset to " "data folder."
        )

    dataset = make_dataset(
        id_dataset=eqa_config.DATASET.TYPE, config=eqa_config.DATASET
    )
    env = habitat.Env(config=eqa_config, dataset=dataset)
    env.episodes = [
        episode
        for episode in dataset.episodes
        if int(episode.episode_id) in TEST_EPISODE_SET[:EPISODES_LIMIT]
    ]

    ep_i = 0
    cycles_n = 2
    while cycles_n > 0:
        env.reset()
        episode = env.current_episode
        assert (
            len(episode.goals) == 1
        ), "Episode has no goals or more than one."
        assert (
            len(episode.shortest_paths) == 1
        ), "Episode has no shortest paths or more than one."
        start_state = env.sim.get_agent_state()
        assert np.allclose(
            start_state.position, episode.start_position
        ), "Agent's start position diverges from the shortest path's one."

        rgb_mean = 0
        logger.info(
            "{id} {question}\n{answer}".format(
                id=episode.episode_id,
                question=episode.question.question_text,
                answer=episode.question.answer_text,
            )
        )

        for step_id, point in enumerate(episode.shortest_paths[0]):
            cur_state = env.sim.get_agent_state()

            logger.info(
                "diff position: {} diff rotation: {} "
                "cur_state.position: {} shortest_path.position: {} "
                "cur_state.rotation: {} shortest_path.rotation: {} action: {}"
                "".format(
                    cur_state.position - point.position,
                    cur_state.rotation - point.rotation,
                    cur_state.position,
                    point.position,
                    cur_state.rotation,
                    point.rotation,
                    point.action,
                )
            )

            assert np.allclose(
                [cur_state.position[0], cur_state.position[2]],
                [point.position[0], point.position[2]],
                atol=CLOSE_STEP_THRESHOLD * (step_id + 1),
            ), "Agent's path diverges from the shortest path."

            obs = env.step(point.action)

            if not env.episode_over:
                rgb_mean += obs["rgb"][:, :, :3].mean()

        if ep_i < len(RGB_EPISODE_MEANS):
            rgb_mean = rgb_mean / len(episode.shortest_paths[0])
            assert np.isclose(
                RGB_EPISODE_MEANS[int(episode.episode_id)], rgb_mean
            ), "RGB output doesn't match the ground truth."

        ep_i = (ep_i + 1) % EPISODES_LIMIT
        if ep_i == 0:
            cycles_n -= 1

    env.close()
Exemplo n.º 20
0
        if self.is_goal_reached():
            if self.located_true_goal:
                action = HabitatSimActions.STOP
            else:
                action = HabitatSimActions.TURN_LEFT
                self.needs_inspection = True
            return {"action": action}
        if self.unseen_obstacle:
            command = HabitatSimActions.TURN_RIGHT
            return command
        command = HabitatSimActions.STOP
        command = self.planner_prediction_to_command(self.waypointPose6D)
        return command


config = get_config("../habitat-api/configs/tasks/objectnav_mp3d_fast.yaml")
config.defrost()
# -----------------------------------------------------------------------------
# ORBSLAM2 BASELINE
# -----------------------------------------------------------------------------
config.ORBSLAM2 = CN()
config.ORBSLAM2.SLAM_VOCAB_PATH = "../habitat-api/habitat_baselines/slambased/data/ORBvoc.txt"
config.ORBSLAM2.SLAM_SETTINGS_PATH = (
    "../habitat-api/habitat_baselines/slambased/data/mp3d3_small1k.yaml")
config.ORBSLAM2.MAP_CELL_SIZE = 0.1
config.ORBSLAM2.MAP_SIZE = 40
config.ORBSLAM2.CAMERA_HEIGHT = config.SIMULATOR.DEPTH_SENSOR.POSITION[1]
config.ORBSLAM2.BETA = 100
config.ORBSLAM2.H_OBSTACLE_MIN = 0.3 * config.ORBSLAM2.CAMERA_HEIGHT
config.ORBSLAM2.H_OBSTACLE_MAX = 1.0 * config.ORBSLAM2.CAMERA_HEIGHT
config.ORBSLAM2.D_OBSTACLE_MIN = 0.1
Exemplo n.º 21
0
def test_get_observations_at():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = []
    config.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"]
    config.freeze()
    env = habitat.Env(config=config, dataset=None)

    # start position is checked for validity for the specific test scene
    valid_start_position = [-1.3731, 0.08431, 8.60692]
    expected_pointgoal = [0.1, 0.2, 0.3]
    goal_position = np.add(valid_start_position, expected_pointgoal)

    # starting quaternion is rotated 180 degree along z-axis, which
    # corresponds to simulator using z-negative as forward action
    start_rotation = [0, 0, 0, 1]

    env.episode_iterator = iter(
        [
            NavigationEpisode(
                episode_id="0",
                scene_id=config.SIMULATOR.SCENE,
                start_position=valid_start_position,
                start_rotation=start_rotation,
                goals=[NavigationGoal(position=goal_position)],
            )
        ]
    )
    non_stop_actions = [
        act
        for act in range(env.action_space.n)
        if act != SimulatorActions.STOP
    ]

    obs = env.reset()
    start_state = env.sim.get_agent_state()
    for _ in range(100):
        # Note, this test will not currently work for camera change actions
        # (look up/down), only for movement actions.
        new_obs = env.step(np.random.choice(non_stop_actions))
        for key, val in new_obs.items():
            agent_state = env.sim.get_agent_state()
            if not (
                np.allclose(agent_state.position, start_state.position)
                and np.allclose(agent_state.rotation, start_state.rotation)
            ):
                assert not np.allclose(val, obs[key])
        obs_at_point = env.sim.get_observations_at(
            start_state.position,
            start_state.rotation,
            keep_agent_at_new_pose=False,
        )
        for key, val in obs_at_point.items():
            assert np.allclose(val, obs[key])

    obs_at_point = env.sim.get_observations_at(
        start_state.position, start_state.rotation, keep_agent_at_new_pose=True
    )
    for key, val in obs_at_point.items():
        assert np.allclose(val, obs[key])
    agent_state = env.sim.get_agent_state()
    assert np.allclose(agent_state.position, start_state.position)
    assert np.allclose(agent_state.rotation, start_state.rotation)
    env.close()
Exemplo n.º 22
0
def test_noise_models_rgbd():
    DEMO_MODE = False
    N_STEPS = 100

    config = get_config()
    config.defrost()
    config.SIMULATOR.SCENE = (
        "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb")
    config.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"]
    config.freeze()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    valid_start_position = [-1.3731, 0.08431, 8.60692]

    expected_pointgoal = [0.1, 0.2, 0.3]
    goal_position = np.add(valid_start_position, expected_pointgoal)

    # starting quaternion is rotated 180 degree along z-axis, which
    # corresponds to simulator using z-negative as forward action
    start_rotation = [0, 0, 0, 1]
    test_episode = NavigationEpisode(
        episode_id="0",
        scene_id=config.SIMULATOR.SCENE,
        start_position=valid_start_position,
        start_rotation=start_rotation,
        goals=[NavigationGoal(position=goal_position)],
    )

    print(f"{test_episode}")
    with habitat.Env(config=config, dataset=None) as env:

        env.episode_iterator = iter([test_episode])
        no_noise_obs = [env.reset()]
        no_noise_states = [env.sim.get_agent_state()]

        actions = [
            sample_non_stop_action(env.action_space) for _ in range(N_STEPS)
        ]
        for action in actions:
            no_noise_obs.append(env.step(action))
            no_noise_states.append(env.sim.get_agent_state())
        env.close()

        config.defrost()

        config.SIMULATOR.RGB_SENSOR.NOISE_MODEL = "GaussianNoiseModel"
        config.SIMULATOR.RGB_SENSOR.NOISE_MODEL_KWARGS = habitat.Config()
        config.SIMULATOR.RGB_SENSOR.NOISE_MODEL_KWARGS.INTENSITY_CONSTANT = 0.5
        config.SIMULATOR.DEPTH_SENSOR.NOISE_MODEL = "RedwoodDepthNoiseModel"

        config.SIMULATOR.ACTION_SPACE_CONFIG = "pyrobotnoisy"
        config.SIMULATOR.NOISE_MODEL = habitat.Config()
        config.SIMULATOR.NOISE_MODEL.ROBOT = "LoCoBot"
        config.SIMULATOR.NOISE_MODEL.CONTROLLER = "Proportional"
        config.SIMULATOR.NOISE_MODEL.NOISE_MULTIPLIER = 0.5

        config.freeze()

        env = habitat.Env(config=config, dataset=None)

        env.episode_iterator = iter([test_episode])

        obs = env.reset()
        assert np.linalg.norm(
            obs["rgb"].astype(np.float) -
            no_noise_obs[0]["rgb"].astype(np.float)) > 1.5e-2 * np.linalg.norm(
                no_noise_obs[0]["rgb"].astype(
                    np.float)), "No RGB noise detected."

        assert np.linalg.norm(obs["depth"].astype(np.float) -
                              no_noise_obs[0]["depth"].astype(np.float)
                              ) > 1.5e-2 * np.linalg.norm(
                                  no_noise_obs[0]["depth"].astype(
                                      np.float)), "No Depth noise detected."

        images = []
        state = env.sim.get_agent_state()
        angle_diffs = []
        pos_diffs = []
        for action in actions:
            prev_state = state
            obs = env.step(action)
            state = env.sim.get_agent_state()
            position_change = np.linalg.norm(np.array(state.position) -
                                             np.array(prev_state.position),
                                             ord=2)

            if action["action"][:5] == "TURN_":
                angle_diff = abs(
                    angle_between_quaternions(state.rotation,
                                              prev_state.rotation) -
                    np.deg2rad(config.SIMULATOR.TURN_ANGLE))
                angle_diffs.append(angle_diff)
            else:
                pos_diffs.append(
                    abs(position_change - config.SIMULATOR.FORWARD_STEP_SIZE))

            if DEMO_MODE:
                images.append(observations_to_image(obs, {}))

        if DEMO_MODE:
            images_to_video(images, "data/video/test_noise", "test_noise")

        assert (np.mean(angle_diffs) >
                0.025), "No turn action actuation noise detected."
        assert (np.mean(pos_diffs) >
                0.025), "No forward action actuation noise detected."
from typing import List, Optional, Union

from habitat.config.default import Config as CN
from habitat.config.default import get_config

_C = get_config()
_C.defrost()

# -----------------------------------------------------------------------------
# GPS SENSOR
# -----------------------------------------------------------------------------
_C.TASK.GLOBAL_GPS_SENSOR = CN()
_C.TASK.GLOBAL_GPS_SENSOR.TYPE = "GlobalGPSSensor"
_C.TASK.GLOBAL_GPS_SENSOR.DIMENSIONALITY = 3
# -----------------------------------------------------------------------------
# ORACLE ACTION SENSOR
# -----------------------------------------------------------------------------
_C.TASK.ORACLE_ACTION_SENSOR = CN()
_C.TASK.ORACLE_ACTION_SENSOR.TYPE = "OracleActionSensor"
_C.TASK.ORACLE_ACTION_SENSOR.GOAL_RADIUS = 0.5
# -----------------------------------------------------------------------------
# VLN ORACLE ACTION SENSOR
# -----------------------------------------------------------------------------
_C.TASK.VLN_ORACLE_ACTION_SENSOR = CN()
_C.TASK.VLN_ORACLE_ACTION_SENSOR.TYPE = "VLNOracleActionSensor"
_C.TASK.VLN_ORACLE_ACTION_SENSOR.GOAL_RADIUS = 0.5
# -----------------------------------------------------------------------------
# VLN ORACLE PROGRESS SENSOR
# -----------------------------------------------------------------------------
_C.TASK.VLN_ORACLE_PROGRESS_SENSOR = CN()
_C.TASK.VLN_ORACLE_PROGRESS_SENSOR.TYPE = "VLNOracleProgressSensor"
    def run_episodes(self):
        self.ep_idx = 0
        # if os.path.exists('/home/nel/gsarch/habitat/habitat-lab/HabitatScripts/maps'):
        #     dir_name = "/home/nel/gsarch/habitat/habitat-lab/HabitatScripts/maps/"
        #     test = os.listdir(dir_name)
        #     for item in test:
        #         if item.endswith(".png"):
        #             os.remove(os.path.join(dir_name, item))
        for episode in range(self.num_episodes):
            print("STARTING EPISODE ", episode)

            print("DELETING PREVIOUS FILES")
            # remove png files from previous episode
            filelist = glob.glob(
                os.path.join(self.orbslam_path, "rgb", "*.png"))
            for f in filelist:
                os.remove(f)
            filelist = glob.glob(
                os.path.join(self.orbslam_path, "depth", "*.png"))
            for f in filelist:
                os.remove(f)

            # # reset trajectory files
            # deleted = []
            # if os.path.exists(self.ORBSLAM_Cam_Traj):
            #     deleted.append(['cam_traj'])
            #     os.remove(self.ORBSLAM_Cam_Traj)
            # if os.path.exists(self.ORBSLAM_Cam_Keypoints):
            #     deleted.append(['keypoint_traj'])
            #     os.remove(self.ORBSLAM_Cam_Keypoints)
            # associations_file = "/home/nel/ORB_SLAM2/Examples/RGB-D/associations/replica.txt"
            # if os.path.exists(associations_file):
            #     deleted.append(['associations'])
            #     os.remove(associations_file)
            # if os.path.exists(self.orbslam_path + "/" + 'rgb.txt'):
            #     deleted.append(['rgb.txt'])
            #     os.remove(self.orbslam_path + "/" + 'rgb.txt')
            # if os.path.exists(self.orbslam_path + "/" + 'depth.txt'):
            #     deleted.append(['depth.txt'])
            #     os.remove(self.orbslam_path + "/" + 'depth.txt')

            # print('Deleted: ', deleted)

            # print("DONE. ", "DELETED ", len(filelist), " files.")

            # mapname = np.random.choice(self.mapnames)
            mapname = self.mapnames[episode]  # KEEP THIS
            #mapname = 'apartment_0'
            #mapname = 'frl_apartment_4'
            self.test_scene = "/home/nel/gsarch/Replica-Dataset/out/{}/habitat/mesh_semantic.ply".format(
                mapname)
            self.object_json = "/home/nel/gsarch/Replica-Dataset/out/{}/habitat/info_semantic.json".format(
                mapname)
            # self.test_scene = "/hdd/replica/Replica-Dataset/out/{}/habitat/mesh_semantic.ply".format(mapname)
            # self.object_json = "/hdd/replica/Replica-Dataset/out/{}/habitat/info_semantic.json".format(mapname)
            self.sim_settings = {
                "width": 256,  # Spatial resolution of the observations
                "height": 256,
                "scene": self.test_scene,  # Scene path
                "default_agent": 0,
                "sensor_height": 1.5,  # Height of sensors in meters
                "color_sensor": True,  # RGB sensor
                "semantic_sensor": True,  # Semantic sensor
                "depth_sensor": True,  # Depth sensor
                "seed": 1,
            }

            # self.basepath = f"/home/nel/gsarch/replica_novel_categories/{mapname}_{episode}"
            self.basepath = '/home/nel/gsarch/replica_depth_temp'
            # if os.path.exists(self.basepath):
            #     os.remove(self.basepath + "/*")
            self.basepath = self.basepath + f"/{mapname}_{episode}"
            print("BASEPATH: ", self.basepath)

            # self.basepath = f"/hdd/ayushj/habitat_data/{mapname}_{episode}"
            if not os.path.exists(self.basepath):
                os.mkdir(self.basepath)

            self.cfg, self.sim_cfg = self.make_cfg(self.sim_settings)
            self.sim = habitat_sim.Simulator(self.cfg)
            random.seed(self.sim_settings["seed"])
            self.sim.seed(self.sim_settings["seed"])
            self.set_agent(self.sim_settings)
            self.nav_pts = self.get_navigable_points()

            config = get_config()
            config.defrost()
            config.TASK.SENSORS = []
            config.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"]
            config.freeze()

            self.run()

            self.sim.close()
            time.sleep(3)

            self.ep_idx += 1
Exemplo n.º 25
0
def test_pointgoal_with_gps_compass_sensor():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = [
        "POINTGOAL_WITH_GPS_COMPASS_SENSOR",
        "COMPASS_SENSOR",
        "GPS_SENSOR",
        "POINTGOAL_SENSOR",
    ]
    config.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.DIMENSIONALITY = 3
    config.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.GOAL_FORMAT = "CARTESIAN"

    config.TASK.POINTGOAL_SENSOR.DIMENSIONALITY = 3
    config.TASK.POINTGOAL_SENSOR.GOAL_FORMAT = "CARTESIAN"

    config.TASK.GPS_SENSOR.DIMENSIONALITY = 3

    config.freeze()
    env = habitat.Env(config=config, dataset=None)

    # start position is checked for validity for the specific test scene
    valid_start_position = [-1.3731, 0.08431, 8.60692]
    expected_pointgoal = [0.1, 0.2, 0.3]
    goal_position = np.add(valid_start_position, expected_pointgoal)

    # starting quaternion is rotated 180 degree along z-axis, which
    # corresponds to simulator using z-negative as forward action
    start_rotation = [0, 0, 0, 1]

    env.episode_iterator = iter(
        [
            NavigationEpisode(
                episode_id="0",
                scene_id=config.SIMULATOR.SCENE,
                start_position=valid_start_position,
                start_rotation=start_rotation,
                goals=[NavigationGoal(position=goal_position)],
            )
        ]
    )

    non_stop_actions = [
        act
        for act in range(env.action_space.n)
        if act != SimulatorActions.STOP
    ]
    env.reset()
    for _ in range(100):
        obs = env.step(np.random.choice(non_stop_actions))
        pointgoal = obs["pointgoal"]
        pointgoal_with_gps_compass = obs["pointgoal_with_gps_compass"]
        comapss = obs["compass"]
        gps = obs["gps"]
        # check to see if taking non-stop actions will affect static point_goal
        assert np.allclose(
            pointgoal_with_gps_compass,
            quaternion_rotate_vector(
                quaternion.from_rotation_vector(
                    comapss * np.array([0, 1, 0])
                ).inverse(),
                pointgoal - gps,
            ),
        )

    env.close()
Exemplo n.º 26
0
    def run_episodes(self):

        for episode in range(self.num_episodes):
            print("STARTING EPISODE ", episode)
            # mapname = np.random.choice(self.mapnames)
            mapname = self.mapnames[episode]  # KEEP THIS
            #mapname = 'apartment_0'
            #mapname = 'frl_apartment_4'
            self.test_scene = "/home/nel/gsarch/Replica-Dataset/out/{}/habitat/mesh_semantic.ply".format(
                mapname)
            self.object_json = "/home/nel/gsarch/Replica-Dataset/out/{}/habitat/info_semantic.json".format(
                mapname)
            # self.test_scene = "/hdd/replica/Replica-Dataset/out/{}/habitat/mesh_semantic.ply".format(mapname)
            # self.object_json = "/hdd/replica/Replica-Dataset/out/{}/habitat/info_semantic.json".format(mapname)
            self.sim_settings = {
                "width": 256,  # Spatial resolution of the observations
                "height": 256,
                "scene": self.test_scene,  # Scene path
                "default_agent": 0,
                "sensor_height": 1.5,  # Height of sensors in meters
                "color_sensor": True,  # RGB sensor
                "semantic_sensor": True,  # Semantic sensor
                "depth_sensor": True,  # Depth sensor
                "seed": 1,
            }

            self.basepath = f"/home/nel/gsarch/replica_context/{mapname}_{episode}"
            # self.basepath = f"/hdd/ayushj/habitat_data/{mapname}_{episode}"
            if not os.path.exists(self.basepath):
                os.mkdir(self.basepath)

            self.cfg, self.sim_cfg = self.make_cfg(self.sim_settings)
            self.sim = habitat_sim.Simulator(self.cfg)
            random.seed(self.sim_settings["seed"])
            self.sim.seed(self.sim_settings["seed"])
            self.set_agent(self.sim_settings)
            self.nav_pts = self.get_navigable_points()

            config = get_config()
            config.defrost()
            config.TASK.SENSORS = []
            config.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"]
            config.freeze()

            self.run()

            self.sim.close()
            time.sleep(3)

        fig, ax = plt.subplots(nrows=5, ncols=7, sharex=True, sharey=True)
        fig.text(0.5, 0.04, 'Distance between objects', ha='center')
        fig.text(0.04, 0.5, 'Density', va='center', rotation='vertical')
        keys = list(self.object_pair_dist.keys())
        idx = 0
        for row in ax:
            for col in row:
                try:
                    key = keys[idx]
                except:
                    break
                # plt.hist(self.object_pair_dist['indoor-plant/chair'], density=True, bins=10)
                vals = np.array(self.object_pair_dist[key])
                vals[vals > 10] = 9.99
                col.hist(vals, density=True, bins=20, range=[0, 10])
                col.set_ylim(top=1)
                col.set_title(key, fontsize=5, y=0.9)
                idx += 1
        plt.savefig("/home/nel/gsarch/replica_context/figure.png")
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("--model-path", type=str, required=True)
    parser.add_argument("--sim-gpu-id", type=int, required=True)
    parser.add_argument("--pth-gpu-id", type=int, required=True)
    parser.add_argument("--num-processes", type=int, required=True)
    parser.add_argument("--hidden-size", type=int, default=512)
    parser.add_argument("--count-test-episodes", type=int, default=100)
    parser.add_argument(
        "--sensors",
        type=str,
        default="DEPTH_SENSOR",
        help="comma separated string containing different"
        "sensors to use, currently 'RGB_SENSOR' and"
        "'DEPTH_SENSOR' are supported",
    )
    parser.add_argument(
        "--task-config",
        type=str,
        default="configs/tasks/pointnav.yaml",
        help="path to config yaml containing information about task",
    )

    cmd_line_inputs = [
        "--model-path",
        "/home/bruce/NSERC_2019/habitat-api/data/checkpoints/depth.pth",
        "--sim-gpu-id",
        "0",
        "--pth-gpu-id",
        "0",
        "--num-processes",
        "1",
        "--count-test-episodes",
        "100",
        "--task-config",
        "configs/tasks/pointnav.yaml",
    ]
    args = parser.parse_args(cmd_line_inputs)

    device = torch.device("cuda:{}".format(args.pth_gpu_id))

    env_configs = []
    baseline_configs = []

    for _ in range(args.num_processes):
        config_env = get_config(config_paths=args.task_config)
        config_env.defrost()
        config_env.DATASET.SPLIT = "val"

        agent_sensors = args.sensors.strip().split(",")
        for sensor in agent_sensors:
            assert sensor in ["RGB_SENSOR", "DEPTH_SENSOR"]
        config_env.SIMULATOR.AGENT_0.SENSORS = agent_sensors
        config_env.freeze()
        env_configs.append(config_env)

        config_baseline = cfg_baseline()
        baseline_configs.append(config_baseline)

    assert len(baseline_configs) > 0, "empty list of datasets"

    envs = habitat.VectorEnv(
        make_env_fn=make_env_fn,
        env_fn_args=tuple(
            tuple(zip(env_configs, baseline_configs, range(args.num_processes)))
        ),
    )

    ckpt = torch.load(args.model_path, map_location=device)

    actor_critic = Policy(
        observation_space=envs.observation_spaces[0],
        action_space=envs.action_spaces[0],
        hidden_size=512,
        goal_sensor_uuid="pointgoal",
    )
    actor_critic.to(device)

    ppo = PPO(
        actor_critic=actor_critic,
        clip_param=0.1,
        ppo_epoch=4,
        num_mini_batch=32,
        value_loss_coef=0.5,
        entropy_coef=0.01,
        lr=2.5e-4,
        eps=1e-5,
        max_grad_norm=0.5,
    )

    ppo.load_state_dict(ckpt["state_dict"])

    actor_critic = ppo.actor_critic

    observations = envs.reset()
    batch = batch_obs(observations)
    for sensor in batch:
        batch[sensor] = batch[sensor].to(device)

    test_recurrent_hidden_states = torch.zeros(
        args.num_processes, args.hidden_size, device=device
    )
    not_done_masks = torch.zeros(args.num_processes, 1, device=device)

    def transform_callback(data):
        nonlocal actor_critic
        nonlocal batch
        nonlocal not_done_masks
        nonlocal test_recurrent_hidden_states
        global flag
        global t_prev_update
        global observation

        if flag == 2:
            observation["depth"] = np.reshape(data.data[0:-2], (256, 256, 1))
            observation["pointgoal"] = data.data[-2:]
            flag = 1
            return

        pointgoal_received = data.data[-2:]
        translate_amount = 0.25  # meters
        rotate_amount = 0.174533  # radians

        isrotated = (
            rotate_amount * 0.95
            <= abs(pointgoal_received[1] - observation["pointgoal"][1])
            <= rotate_amount * 1.05
        )
        istimeup = (time.time() - t_prev_update) >= 4

        # print('istranslated is '+ str(istranslated))
        # print('isrotated is '+ str(isrotated))
        # print('istimeup is '+ str(istimeup))

        if isrotated or istimeup:
            vel_msg = Twist()
            vel_msg.linear.x = 0
            vel_msg.linear.y = 0
            vel_msg.linear.z = 0
            vel_msg.angular.x = 0
            vel_msg.angular.y = 0
            vel_msg.angular.z = 0
            pub_vel.publish(vel_msg)
            time.sleep(0.2)
            print("entered update step")

            # cv2.imshow("Depth", observation['depth'])
            # cv2.waitKey(100)

            observation["depth"] = np.reshape(data.data[0:-2], (256, 256, 1))
            observation["pointgoal"] = data.data[-2:]

            batch = batch_obs([observation])
            for sensor in batch:
                batch[sensor] = batch[sensor].to(device)
            if flag == 1:
                not_done_masks = torch.tensor([0.0], dtype=torch.float, device=device)
                flag = 0
            else:
                not_done_masks = torch.tensor([1.0], dtype=torch.float, device=device)

            _, actions, _, test_recurrent_hidden_states = actor_critic.act(
                batch, test_recurrent_hidden_states, not_done_masks, deterministic=True
            )

            action_id = actions.item()
            print(
                "observation received to produce action_id is "
                + str(observation["pointgoal"])
            )
            print("action_id from net is " + str(actions.item()))

            t_prev_update = time.time()
            vel_msg = Twist()
            vel_msg.linear.x = 0
            vel_msg.linear.y = 0
            vel_msg.linear.z = 0
            vel_msg.angular.x = 0
            vel_msg.angular.y = 0
            vel_msg.angular.z = 0
            if action_id == 0:
                vel_msg.linear.x = 0.25 / 4
                pub_vel.publish(vel_msg)
            elif action_id == 1:
                vel_msg.angular.z = 10 / 180 * 3.1415926
                pub_vel.publish(vel_msg)
            elif action_id == 2:
                vel_msg.angular.z = -10 / 180 * 3.1415926
                pub_vel.publish(vel_msg)
            else:
                pub_vel.publish(vel_msg)
                sub.unregister()
                print("NN finished navigation task")

    sub = rospy.Subscriber(
        "depth_and_pointgoal", numpy_msg(Floats), transform_callback, queue_size=1
    )
    rospy.spin()
Exemplo n.º 28
0
 def __init__(self, config_paths: Optional[str] = None) -> None:
     config_env = get_config(config_paths)
     self._env = Env(config=config_env)
Exemplo n.º 29
0
def generate_episodes():
    # -- Load scene files
    semantic_data = np.load(SCENE_INFO_PATH, allow_pickle=True).item()
    df_semantic = semantic_data["df_semantic"]
    df_objects = semantic_data["df_objects"]

    # filter classes
    selected_classes = MATCHING_CLASSES.keys()
    df_objects = df_objects[df_objects.class_name.apply(
        lambda x: x in selected_classes)]
    print("_____________CLASSES INFO_____________")
    print(df_objects.class_name.value_counts())
    print(f"Total count: {len(df_objects)}")
    print("_______________________________________")

    # -- Load config
    config = get_config(BASE_CFG_PATH)
    base_success = config.TASK.SUCCESS_DISTANCE

    env = habitat.Env(config)
    env.seed(config.SEED)
    random.seed(config.SEED)

    # Generate spiral coords
    spiral_shift = np.array(spiral(5, 0.001))

    dataset_episodes = []
    for room in df_objects.room.unique():
        print(f"Generating episodes for room {room}")
        # if room in ['office_1', 'office_2', 'room_2', 'frl_apartment_0', 'office_3',
        #        'frl_apartment_2', 'hotel_0', 'apartment_0', 'frl_apartment_5',
        #        'room_1', 'room_0', 'apartment_2', 'apartment_1']:
        #     continue

        scene_path = SCENE_MOCKUP_PATH.format(room)
        scene_df = df_objects[df_objects.room == room]

        config.defrost()
        config.SIMULATOR.SCENE = scene_path
        config.freeze()
        _random_episode(env, config)
        out = env.reset()

        # =====================================================================
        # Determine floor coord/s

        pts = []
        while len(pts) < 300:
            pt = env.sim.sample_navigable_point()
            if env.sim.island_radius(pt) > ISLAND_RADIUS_LIMIT:
                pts.append(pt[1])
        floor_coords = pd.value_counts(pts).index.values
        floor_coord = []
        while len(floor_coords) > 0:
            floor_coord.append(floor_coords[0])
            floor_coords = floor_coords[floor_coords > (floor_coord[-1] + 2.3)]

        print(f"Room {room} has {len(floor_coord)} floors @ ({floor_coord})")
        print("objects", scene_df.class_name.unique())
        # =====================================================================

        for obj_idx, obj_row in scene_df.iterrows():
            print(f"-generating {NUM_EPISODES} for {obj_row['class_name']}")

            t_coord = obj_row["habitat_coord"]
            t_size = obj_row["habitat_size"]

            # Determine success distance
            h_to_obj = determine_height_to_object(t_coord, t_size, floor_coord)
            if h_to_obj is None:
                print(f"Coord under floor {obj_idx}, coord: {t_coord}")
                continue

            success_distance = \
                determine_height_to_object(t_coord, t_size, floor_coord) + \
                base_success + max(t_size) / 2.

            generator = generate_pointnav_episode(
                sim=env.sim,
                target=t_coord,
                shortest_path_success_distance=success_distance,
                shortest_path_max_steps=config.ENVIRONMENT.MAX_EPISODE_STEPS,
                geodesic_to_euclid_min_ratio=1.1,
                number_retries_per_target=50,
                geodesic_min_ratio_prob=0.5,
                floor_coord=floor_coord,
                spiral_coord=spiral_shift,
            )

            episodes = []

            for i in range(NUM_EPISODES):
                episode = next(generator)

                # Add arguments
                episode.room = room
                episode.t_coord = t_coord
                episode.t_size = t_size
                episode.class_name = obj_row['class_name']

                episodes.append(episode)

            for episode in episodes:
                check_shortest_path(env, episode)

            dataset_episodes += episodes

        np.save("dataset_all", dataset_episodes)

    dataset = habitat.Dataset()
    dataset.episodes = dataset_episodes
Exemplo n.º 30
0
    NUM_EPS_PER_LOCATION = 10 if DEBUG else 100
elif DATA_SPLIT == "val":
    NUM_EPS_PER_LOCATION = 10
    SPLIT = "val"
elif DATA_SPLIT == "test":
    NUM_EPS_PER_LOCATION = 10
    SPLIT = "test"
else:
    raise NotImplementedError("Not implemented for this split type")

OUTPUT_PATH = os.path.join("data", "datasets", "pointnav", DATASET, "v1",
                           SPLIT)
if not os.path.exists(os.path.join(OUTPUT_PATH)):
    os.makedirs(os.path.join(OUTPUT_PATH))

config = get_config(CFG)
config.TASK.success_distance = 0.2
config.TASK.success_reward = 10.0
config.TASK.slack_reward = -0.01
config.TASK.goal_type = "dense"

# get list of all scenes
if DATASET == "mp3d":
    scenes = sorted(glob.glob("data/scene_datasets/mp3d/*"))
elif DATASET == "gibson":
    scenes = sorted(glob.glob("data/scene_datasets/mp3d/*"))
else:
    raise NotImplementedError("Not implemented for this dataset")

num_scenes = len(scenes)
if DATA_SPLIT == "train":