def transform_observation_space(
     self,
     observation_space: spaces.Dict,
 ):
     size = self._size
     observation_space = copy.deepcopy(observation_space)
     if size:
         for key in observation_space.spaces:
             if key in self.trans_keys:
                 # In the observation space dict, the channels are always last
                 h, w = get_image_height_width(
                     observation_space.spaces[key], channels_last=True
                 )
                 if size == min(h, w):
                     continue
                 scale = size / min(h, w)
                 new_h = int(h * scale)
                 new_w = int(w * scale)
                 new_size = (new_h, new_w)
                 logger.info(
                     "Resizing observation of %s: from %s to %s"
                     % (key, (h, w), new_size)
                 )
                 observation_space.spaces[key] = overwrite_gym_box_shape(
                     observation_space.spaces[key], new_size
                 )
     return observation_space
Exemplo n.º 2
0
def make_task(id_task, **kwargs):
    logger.info("Initializing task {}".format(id_task))
    _task = registry.get_task(id_task)
    assert _task is not None, "Could not find task with name {}".format(
        id_task)

    return _task(**kwargs)
Exemplo n.º 3
0
 def _check_episode_is_active(self, *args, **kwargs):
     logger.info(
         "Current agent position: {}".format(self._sim.get_agent_state())
     )
     collision = self._sim.previous_step_collided
     stop_called = not getattr(self, "is_stop_called", False)
     return collision or stop_called
Exemplo n.º 4
0
def test_object_nav_task():
    config = get_config(CFG_TEST)

    if not ObjectNavDatasetV1.check_config_paths_exist(config.DATASET):
        pytest.skip(
            "Please download Matterport3D scene and ObjectNav Datasets to data folder."
        )

    dataset = make_dataset(id_dataset=config.DATASET.TYPE,
                           config=config.DATASET)
    with habitat.Env(config=config, dataset=dataset) as env:
        for i in range(10):
            env.reset()
            while not env.episode_over:
                action = env.action_space.sample()
                habitat.logger.info(f"Action : "
                                    f"{action['action']}, "
                                    f"args: {action['action_args']}.")
                env.step(action)

            metrics = env.get_metrics()
            logger.info(metrics)

        with pytest.raises(AssertionError):
            env.step({"action": MoveForwardAction.name})
Exemplo n.º 5
0
    def submit(self, agent, run_dir, json_filename):
        metrics, eval_dct = self.evaluate_custom(agent)

        for k, v in metrics.items():
            logger.info("{}: {}".format(k, v))

        if not os.path.isdir(run_dir):
            os.makedirs(run_dir)
        with open(os.path.join(run_dir, json_filename), "w") as fo:
            json.dump(eval_dct, fo)
Exemplo n.º 6
0
 def write(self, frame):
     if self.writer is None:
         self.resolution = (frame.shape[1], frame.shape[0])
         self.writer = get_video_writer(self.filename, self.fps,
                                        self.resolution)
     else:
         res = (frame.shape[1], frame.shape[0])
         if res != self.resolution:
             logger.info(
                 f"Warning: video resolution mismatch expected={self.resolution}, frame={res}"
             )
     self.writer.write(frame)
Exemplo n.º 7
0
def check_json_serializaiton(dataset: habitat.Dataset):
    start_time = time.time()
    json_str = str(dataset.to_json())
    logger.info("JSON conversion finished. {} sec".format(
        (time.time() - start_time)))
    decoded_dataset = dataset.__class__()
    decoded_dataset.from_json(json_str)
    assert len(decoded_dataset.episodes) > 0
    episode = decoded_dataset.episodes[0]
    assert isinstance(episode, Episode)
    assert (decoded_dataset.to_json() == json_str
            ), "JSON dataset encoding/decoding isn't consistent"
Exemplo n.º 8
0
    def run(self, args):
        video_writer = None
        actions, info, observations = self.do_episode(
            overlay_goal_radar=args.overlay,
            show_map=args.show_map,
            video_writer=video_writer,
        )
        if not self.is_quit:
            agents = {}
            if "blind" in args.agent:
                agents["Blind"] = DemoBlindAgent()
            comparisons = self.get_comparisons(agents)
            comparisons["Yours"] = actions, info, observations
        else:
            comparisons = {"Yours": (actions, info, observations)}

        # Save the actions
        if args.save_actions is not None:
            save_info = {"config": args.task_config}
            self.save_comparisons(comparisons, args.save_actions, save_info)

        while not self.is_quit:
            # Display info about how well you did
            viewer = Viewer(observations,
                            overlay_goal_radar=args.overlay,
                            show_map=True)

            # Show other people's route
            shortcut_keys = self.show_comparisons(comparisons)

            # Hack to get size of video
            keystroke = cv2.waitKey(0)
            selected_name = shortcut_keys.get(keystroke)
            if selected_name is not None:
                (actions, info, observations) = comparisons[selected_name]
                logger.info(f"Selected {selected_name}")
                video_writer = (VideoWriter(f"{selected_name}.avi")
                                if args.save_video else None)
                self.replay(
                    selected_name,
                    actions,
                    overlay_goal_radar=args.overlay,
                    delay=1,
                    video_writer=video_writer,
                )
                if video_writer is not None:
                    video_writer.release()
            else:
                action = self.action_keys_map.get(keystroke)
                if action is not None and action.is_quit:
                    self.is_quit = True
                    break
    def __init__(self, config: Optional[Config] = None) -> None:
        self.config = config

        if config and not self.check_config_paths_exist(config):
            logger.info(
                "Rearrange task assets are not downloaded locally, downloading and extracting now..."
            )
            data_downloader.main(
                ["--uids", "rearrange_task_assets", "--no-replace"])
            logger.info("Downloaded and extracted the data.")

        check_and_gen_physics_config()

        super().__init__(config)
Exemplo n.º 10
0
    def __init__(
        self,
        config,
        action_keys: List[ActionKeyMapping],
        instructions: List[str],
    ):
        self.window_name = "Habitat"
        self.config = config
        self.instructions = instructions
        self.action_keys = action_keys
        self.action_keys_map = {k.key: k for k in self.action_keys}
        self.is_quit = False

        self.env = habitat.Env(config=self.config)
        logger.info("Environment creation successful")
Exemplo n.º 11
0
 def transform_observation_space(self,
                                 observation_space,
                                 trans_keys=["rgb", "depth", "semantic"]):
     size = self._size
     observation_space = copy.deepcopy(observation_space)
     if size:
         for key in observation_space.spaces:
             if (key in trans_keys
                     and observation_space.spaces[key].shape != size):
                 logger.info("Overwriting CNN input size of %s: %s" %
                             (key, size))
                 observation_space.spaces[key] = overwrite_gym_box_shape(
                     observation_space.spaces[key], size)
     self.observation_space = observation_space
     return observation_space
Exemplo n.º 12
0
def check_json_serializaiton(dataset: habitat.Dataset):
    start_time = time.time()
    json_str = dataset.to_json()
    logger.info("JSON conversion finished. {} sec".format(
        (time.time() - start_time)))
    decoded_dataset = ObjectNavDatasetV1()
    decoded_dataset.from_json(json_str)
    assert len(decoded_dataset.episodes) == len(dataset.episodes)
    episode = decoded_dataset.episodes[0]
    assert isinstance(episode, Episode)

    # The strings won't match exactly as dictionaries don't have an order for the keys
    # Thus we need to parse the json strings and compare the serialized forms
    assert json.loads(decoded_dataset.to_json()) == json.loads(
        json_str), "JSON dataset encoding/decoding isn't consistent"
Exemplo n.º 13
0
    def do_episode(self,
                   overlay_goal_radar=False,
                   show_map=False,
                   video_writer=None):
        ''' Have human controlled navigation for one episode
        '''
        env = self.env
        action_keys_map = self.action_keys_map

        observations = env.reset(keep_current_episode=False)
        info = env.get_metrics()
        viewer = Viewer(
            observations,
            overlay_goal_radar=overlay_goal_radar,
            show_map=show_map,
        )
        img = viewer.draw_observations(observations, info)
        goal_radius = get_goal_radius(env)
        distance = observations["pointgoal"][0]
        self.update(
            add_text(
                img,
                [f"Distance {distance:.5}/{goal_radius:.5}"] +
                self.instructions,
            ))

        logger.info("Agent stepping around inside environment.")
        actions = []
        while not env.episode_over:
            keystroke = cv2.waitKey(0)

            action = action_keys_map.get(keystroke)
            if action is not None:
                logger.info(action.name)
                if action.is_quit:
                    self.is_quit = True
                    break
            else:
                logger.info("INVALID KEY")
                continue

            actions.append(action.action_id)
            observations = env.step(action.action_id)
            info = env.get_metrics()

            img = viewer.draw_observations(observations, info)
            distance = observations["pointgoal"][0]
            self.update(
                add_text(
                    img,
                    [f"Distance {distance:.5}/{goal_radius:.5}"] +
                    self.instructions,
                ),
                video_writer,
            )

        logger.info("Episode finished after {} steps.".format(len(actions)))
        return actions, info, observations
Exemplo n.º 14
0
    def get_agent_actions(self, agent):
        ''' Get actions for trained agent
        '''
        # NOTE: Action space for agent is hard coded (need to match our scenario)
        env = self.env
        observations = env.reset(keep_current_episode=True)
        agent.reset()
        actions = []
        while not env.episode_over:
            action = agent(observations)
            actions.append(action)
            observations = env.step(action)
            info = env.get_metrics()

        logger.info("Episode finished after {} steps.".format(len(actions)))
        return actions, info, observations
Exemplo n.º 15
0
def test_eqa_task():
    eqa_config = get_config(CFG_TEST)

    if not mp3d_dataset.Matterport3dDatasetV1.check_config_paths_exist(
        eqa_config.DATASET
    ):
        pytest.skip("Please download Matterport3D EQA dataset to data folder.")

    dataset = make_dataset(
        id_dataset=eqa_config.DATASET.TYPE, config=eqa_config.DATASET
    )
    with habitat.Env(config=eqa_config, dataset=dataset) as env:
        env.episodes = list(
            filter(
                lambda e: int(e.episode_id)
                in TEST_EPISODE_SET[:EPISODES_LIMIT],
                dataset.episodes,
            )
        )

        env.reset()

        for i in range(10):
            action = sample_non_stop_action(env.action_space)
            if action["action"] != AnswerAction.name:
                env.step(action)
            metrics = env.get_metrics()
            del metrics["episode_info"]
            logger.info(metrics)

        correct_answer_id = env.current_episode.question.answer_token
        env.step(
            {
                "action": AnswerAction.name,
                "action_args": {"answer_id": correct_answer_id},
            }
        )

        metrics = env.get_metrics()
        del metrics["episode_info"]
        logger.info(metrics)
        assert metrics["answer_accuracy"] == 1

        with pytest.raises(AssertionError):
            env.step({"action": MoveForwardAction.name})
Exemplo n.º 16
0
    def get_follower_actions(self, mode="geodesic_path"):
        ''' Get shortest path actions
        '''
        env = self.env
        observations = env.reset(keep_current_episode=True)
        goal_radius = get_goal_radius(env)
        follower = ShortestPathFollower(env.sim, goal_radius, False)
        follower.mode = mode
        actions = []
        while not env.episode_over:
            best_action = follower.get_next_action(
                env.current_episode.goals[0].position)
            actions.append(best_action.value)
            observations = env.step(best_action.value)
            info = env.get_metrics()

        logger.info("Episode finished after {} steps.".format(len(actions)))
        return actions, info, observations
Exemplo n.º 17
0
    def transform_observation_space(
        self,
        observation_space: spaces.Dict,
    ):
        size = self._size
        observation_space = copy.deepcopy(observation_space)
        if size:
            for key in observation_space.spaces:
                if (key in self.trans_keys and
                        observation_space.spaces[key].shape[-3:-1] != size):
                    h, w = get_image_height_width(
                        observation_space.spaces[key], channels_last=True)
                    logger.info(
                        "Center cropping observation size of %s from %s to %s"
                        % (key, (h, w), size))

                    observation_space.spaces[key] = overwrite_gym_box_shape(
                        observation_space.spaces[key], size)
        return observation_space
Exemplo n.º 18
0
 def transform_observation_space(
     self,
     observation_space: SpaceDict,
 ):
     r"""Transforms the target UUID's sensor obs_space so it matches the new shape (EQ_H, EQ_W)"""
     # Transforms the observation space to of the target UUID
     observation_space = copy.deepcopy(observation_space)
     for i, key in enumerate(self.target_uuids):
         assert (
             key in observation_space.spaces
         ), f"{key} not found in observation space: {observation_space.spaces}"
         c = self.cubemap_length
         logger.info(
             f"Overwrite sensor: {key} from size of ({c}, {c}) to equirect image of {self.eq_shape} from sensors: {self.sensor_uuids[i*6:(i+1)*6]}"
         )
         if (c, c) != self.eq_shape:
             observation_space.spaces[key] = overwrite_gym_box_shape(
                 observation_space.spaces[key], self.eq_shape)
     return observation_space
Exemplo n.º 19
0
def images_to_video(
    images: List[np.ndarray],
    output_dir: str,
    video_name: str,
    fps: int = 10,
    quality: Optional[float] = 5,
    verbose: bool = True,
    **kwargs,
):
    r"""Calls imageio to run FFMPEG on a list of images. For more info on
    parameters, see https://imageio.readthedocs.io/en/stable/format_ffmpeg.html
    Args:
        images: The list of images. Images should be HxWx3 in RGB order.
        output_dir: The folder to put the video in.
        video_name: The name for the video.
        fps: Frames per second for the video. Not all values work with FFMPEG,
            use at your own risk.
        quality: Default is 5. Uses variable bit rate. Highest quality is 10,
            lowest is 0.  Set to None to prevent variable bitrate flags to
            FFMPEG so you can manually specify them using output_params
            instead. Specifying a fixed bitrate using ‘bitrate’ disables
            this parameter.
    """
    assert 0 <= quality <= 10
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    video_name = video_name.replace(" ", "_").replace("\n", "_") + ".mp4"
    writer = imageio.get_writer(
        os.path.join(output_dir, video_name),
        fps=fps,
        quality=quality,
        **kwargs,
    )
    logger.info(f"Video created: {os.path.join(output_dir, video_name)}")
    if verbose:
        images_iter = tqdm.tqdm(images)
    else:
        images_iter = images
    for im in images_iter:
        writer.append_data(im)
    writer.close()
Exemplo n.º 20
0
 def transform_observation_space(
     self,
     observation_space: spaces.Dict,
 ):
     r"""Transforms the target UUID's sensor obs_space so it matches the new shape (H, W)"""
     # Transforms the observation space to of the target UUID
     for i, key in enumerate(self.target_uuids):
         assert (
             key in observation_space.spaces
         ), f"{key} not found in observation space: {observation_space.spaces}"
         h, w = get_image_height_width(observation_space.spaces[key],
                                       channels_last=True)
         in_len = self.converter.input_len
         logger.info(
             f"Overwrite sensor: {key} from size of ({h}, {w}) to image of"
             f" {self.img_shape} from sensors: {self.sensor_uuids[i*in_len:(i+1)*in_len]}"
         )
         if (h, w) != self.img_shape:
             observation_space.spaces[key] = overwrite_gym_box_shape(
                 observation_space.spaces[key], self.img_shape)
     return observation_space
Exemplo n.º 21
0
    def replay(
        self,
        name,
        actions,
        overlay_goal_radar=False,
        delay=1,
        video_writer=None,
    ):
        ''' Replay actions and show video
        '''
        # Set delay to 0 to wait for key presses before advancing
        env = self.env
        action_keys_map = self.action_keys_map

        observations = env.reset(keep_current_episode=True)
        info = env.get_metrics()
        viewer = Viewer(observations,
                        overlay_goal_radar=overlay_goal_radar,
                        show_map=True)
        img = viewer.draw_observations(observations, info)
        self.update(add_text(img, [name]))

        count_steps = 0
        for action_id in actions:
            # wait for key before advancing
            keystroke = cv2.waitKey(delay)
            action = action_keys_map.get(keystroke)
            if action is not None:
                if action.is_quit:
                    self.is_quit = True
                    break

            observations = env.step(action_id)
            info = env.get_metrics()
            count_steps += 1

            img = viewer.draw_observations(observations, info)
            self.update(add_text(img, [name]), video_writer)

        logger.info("Episode finished after {} steps.".format(count_steps))
Exemplo n.º 22
0
 def transform_observation_space(
     self,
     observation_space: spaces.Dict,
 ):
     r"""Transforms the target UUID's sensor obs_space so it matches the new shape (FISH_H, FISH_W)"""
     # Transforms the observation space to of the target UUID
     for i, key in enumerate(self.target_uuids):
         assert (
             key in observation_space.spaces
         ), f"{key} not found in observation space: {observation_space.spaces}"
         h, w = get_image_height_width(observation_space.spaces[key],
                                       channels_last=True)
         assert (
             h == w
         ), f"cubemap height and width must be the same, but is {h} and {w}"
         logger.info(
             f"Overwrite sensor: {key} from size of ({h}, {w}) to fisheye image of {self.fish_shape} from sensors: {self.sensor_uuids[i*6:(i+1)*6]}"
         )
         if (h, w) != self.fish_shape:
             observation_space.spaces[key] = overwrite_gym_box_shape(
                 observation_space.spaces[key], self.fish_shape)
     return observation_space
Exemplo n.º 23
0
def test_rearrange_task(test_cfg_path):
    config = baselines_get_config(test_cfg_path)

    env_class = get_env_class(config.ENV_NAME)

    env = habitat_baselines.utils.env_utils.make_env_fn(
        env_class=env_class, config=config
    )

    with env:
        for _ in range(10):
            env.reset()
            done = False
            while not done:
                action = env.action_space.sample()
                habitat.logger.info(
                    f"Action : "
                    f"{action['action']}, "
                    f"args: {action['action_args']}."
                )
                _, _, done, info = env.step(action=action)

            logger.info(info)
def test_rearrange_task():
    config = baselines_get_config(
        "habitat_baselines/config/rearrange/ddppo_rearrangepick.yaml")
    # if not RearrangeDatasetV0.check_config_paths_exist(config.TASK_CONFIG.DATASET):
    #     pytest.skip("Test skipped as dataset files are missing.")

    env_class = get_env_class(config.ENV_NAME)

    env = habitat_baselines.utils.env_utils.make_env_fn(env_class=env_class,
                                                        config=config)

    with env:
        for _ in range(10):
            env.reset()
            done = False
            while not done:
                action = env.action_space.sample()
                habitat.logger.info(f"Action : "
                                    f"{action['action']}, "
                                    f"args: {action['action_args']}.")
                _, _, done, info = env.step(action=action)

            logger.info(info)
Exemplo n.º 25
0
    def _setup_eval_config(self, checkpoint_config: Config) -> Config:
        r"""Sets up and returns a merged config for evaluation. Config
            object saved from checkpoint is merged into config file specified
            at evaluation time with the following overwrite priority:
                  eval_opts > ckpt_opts > eval_cfg > ckpt_cfg
            If the saved config is outdated, only the eval config is returned.

        Args:
            checkpoint_config: saved config from checkpoint.

        Returns:
            Config: merged config for eval.
        """

        config = self.config.clone()
        config.defrost()

        ckpt_cmd_opts = checkpoint_config.CMD_TRAILING_OPTS
        eval_cmd_opts = config.CMD_TRAILING_OPTS

        try:
            config.merge_from_other_cfg(checkpoint_config)
            config.merge_from_other_cfg(self.config)
            config.merge_from_list(ckpt_cmd_opts)
            config.merge_from_list(eval_cmd_opts)
        except KeyError:
            logger.info("Saved config is outdated, using solely eval config")
            config = self.config.clone()
            config.merge_from_list(eval_cmd_opts)
        if config.TASK_CONFIG.DATASET.SPLIT == "train":
            config.TASK_CONFIG.defrost()
            config.TASK_CONFIG.DATASET.SPLIT = "val"

        config.TASK_CONFIG.SIMULATOR.AGENT_0.SENSORS = self.config.SENSORS
        config.freeze()

        return config
Exemplo n.º 26
0
 def submit(self, agent, num_episodes=None, skip_first_n=0):
     metrics = super().evaluate(agent, num_episodes=num_episodes, skip_first_n=skip_first_n)
     for k, v in metrics.items():
         logger.info("{}: {}".format(k, v))
Exemplo n.º 27
0
    def _worker_env(
        connection_read_fn: Callable,
        connection_write_fn: Callable,
        env_fn: Callable,
        env_fn_args: Tuple[Any],
        auto_reset_done: bool,
        child_pipe: Optional[Connection] = None,
        parent_pipe: Optional[Connection] = None,
    ) -> None:
        r"""process worker for creating and interacting with the environment.
        """
        env = env_fn(*env_fn_args)
        if parent_pipe is not None:
            parent_pipe.close()
        try:
            command, data = connection_read_fn()
            while command != CLOSE_COMMAND:
                if command == STEP_COMMAND:
                    # different step methods for habitat.RLEnv and habitat.Env
                    if isinstance(env, habitat.RLEnv) or isinstance(
                            env, gym.Env):
                        # habitat.RLEnv
                        observations, reward, done, info = env.step(**data)
                        if auto_reset_done and done:
                            observations = env.reset()
                        connection_write_fn((observations, reward, done, info))
                    elif isinstance(env, habitat.Env):
                        # habitat.Env
                        observations = env.step(**data)
                        if auto_reset_done and env.episode_over:
                            observations = env.reset()
                        connection_write_fn(observations)
                    else:
                        raise NotImplementedError

                elif command == RESET_COMMAND:
                    observations = env.reset()
                    connection_write_fn(observations)

                elif command == RENDER_COMMAND:
                    connection_write_fn(env.render(*data[0], **data[1]))

                elif (command == OBSERVATION_SPACE_COMMAND
                      or command == ACTION_SPACE_COMMAND):
                    if isinstance(command, str):
                        connection_write_fn(getattr(env, command))

                elif command == CALL_COMMAND:
                    function_name, function_args = data
                    if function_args is None or len(function_args) == 0:
                        result = getattr(env, function_name)()
                    else:
                        result = getattr(env, function_name)(**function_args)
                    connection_write_fn(result)

                # TODO: update CALL_COMMAND for getting attribute like this
                elif command == EPISODE_COMMAND:
                    connection_write_fn(env.current_episode)
                else:
                    raise NotImplementedError

                command, data = connection_read_fn()

            if child_pipe is not None:
                child_pipe.close()
        except KeyboardInterrupt:
            logger.info("Worker KeyboardInterrupt")
        finally:
            env.close()
Exemplo n.º 28
0
def make_sim(id_sim, **kwargs):
    logger.info("initializing sim {}".format(id_sim))
    _sim = registry.get_simulator(id_sim)
    assert _sim is not None, "Could not find simulator with name {}".format(
        id_sim)
    return _sim(**kwargs)
Exemplo n.º 29
0
 def _check_agent_position(self, position, agent_id=0) -> bool:
     if not np.allclose(position, self.get_agent_state(agent_id).position):
         logger.info("Agent state diverges from configured start position.")
         return False
     return True
Exemplo n.º 30
0
def make_dataset(id_dataset, **kwargs):
    logger.info("Initializing dataset {}".format(id_dataset))
    _dataset = registry.get_dataset(id_dataset)
    assert _dataset is not None, "Could not find dataset {}".format(id_dataset)

    return _dataset(**kwargs)  # type: ignore