예제 #1
0
def main():
    HabitatSimActions.extend_action_space("STRAFE_LEFT")
    HabitatSimActions.extend_action_space("STRAFE_RIGHT")

    config = habitat.get_config(config_paths="configs/tasks/pointnav.yaml")
    config.defrost()

    config.TASK.POSSIBLE_ACTIONS = config.TASK.POSSIBLE_ACTIONS + [
        "STRAFE_LEFT",
        "STRAFE_RIGHT",
    ]
    config.TASK.ACTIONS.STRAFE_LEFT = habitat.config.Config()
    config.TASK.ACTIONS.STRAFE_LEFT.TYPE = "StrafeLeft"
    config.TASK.ACTIONS.STRAFE_RIGHT = habitat.config.Config()
    config.TASK.ACTIONS.STRAFE_RIGHT.TYPE = "StrafeRight"
    config.SIMULATOR.ACTION_SPACE_CONFIG = "NoNoiseStrafe"
    config.freeze()

    with habitat.Env(config=config) as env:
        env.reset()
        env.step("STRAFE_LEFT")
        env.step("STRAFE_RIGHT")

    config.defrost()
    config.SIMULATOR.ACTION_SPACE_CONFIG = "NoiseStrafe"
    config.freeze()

    with habitat.Env(config=config) as env:
        env.reset()
        env.step("STRAFE_LEFT")
        env.step("STRAFE_RIGHT")
예제 #2
0
def make_env_fn(
    # config: Config, env_class: Type[Union[Env, RLEnv]], rank: int
    kwargs, rank: int
) -> Union[Env, RLEnv]:
    r"""Creates an env of type env_class with specified config and rank.
    This is to be passed in as an argument when creating VectorEnv.

    Args:
        config: root exp config that has core env config node as well as
            env-specific config node.
        env_class: class type of the env to be created.
        rank: rank of env to be created (for seeding).

    Returns:
        env object created according to specification.
    """
    # dataset = make_dataset(
    #     config.TASK_CONFIG.DATASET.TYPE, config=config.TASK_CONFIG.DATASET
    # )
    # env = env_class(config=config, dataset=dataset)

    if not HabitatSimActions.has_action('TURN_ANGLE'):
        HabitatSimActions.extend_action_space('TURN_ANGLE')
    wrappers = kwargs.pop('wrappers')
    env = Habitat(**kwargs)
    for wrapper_cls, params in wrappers:
        env = wrapper_cls(env, **params)
    env.seed(rank)
    return env
예제 #3
0
파일: habitat.py 프로젝트: Danmou/MerCur-Re
def get_config(training: bool = False,
               top_down_map: bool = False,
               max_steps: Optional[Union[int, float]] = None,
               task: str = 'pointnav',
               train_dataset: str = 'habitat_test',
               train_split: str = 'train',
               eval_dataset: str = 'habitat_test',
               eval_split: str = 'val',
               gpu_id: int = 0,
               image_key: str = 'rgb',
               goal_key: str = 'pointgoal_with_gps_compass',
               depth_key: Optional[str] = None,
               reward_function: RewardFunction = gin.REQUIRED,
               eval_episodes_per_scene: int = 3) -> Dict[str, Any]:
    mode = 'train' if training else 'eval'
    dataset = train_dataset if training else eval_dataset
    split = train_split if training else eval_split
    opts = ['SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID', gpu_id]
    if max_steps:
        opts += ['ENVIRONMENT.MAX_EPISODE_STEPS', int(max_steps)]
    opts += ['DATASET.SPLIT', split]
    if not training:
        opts += ['ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE', False]
        opts += [
            'ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_EPISODES',
            eval_episodes_per_scene
        ]
    if not task.endswith('.yaml'):
        task = f'{get_config_dir()}/habitat/tasks/{task}.yaml'
    if not dataset.endswith('.yaml'):
        dataset = f'{get_config_dir()}/habitat/datasets/{dataset}.yaml'
    task_file = Path(wandb.run.dir) / 'task.yaml'
    dataset_file = Path(wandb.run.dir) / f'{mode}_dataset.yaml'
    copyfile(task, task_file)
    copyfile(dataset, dataset_file)
    wandb.save(str(task_file))
    wandb.save(str(dataset_file))
    if not HabitatSimActions.has_action('TURN_ANGLE'):
        HabitatSimActions.extend_action_space('TURN_ANGLE')
    config = habitat.get_config([task, dataset], opts)
    config.defrost()
    config.TASK.SUCCESS = habitat.Config()
    config.TASK.SUCCESS.TYPE = 'Success'
    config.TASK.SUCCESS.SUCCESS_DISTANCE = config.TASK.SUCCESS_DISTANCE
    config.TASK.MEASUREMENTS.append('SUCCESS')
    config.TASK.ACTIONS.TURN_ANGLE = habitat.Config()
    config.TASK.ACTIONS.TURN_ANGLE.TYPE = 'TurnAngleAction'
    config.TASK.POSSIBLE_ACTIONS = ['STOP', 'MOVE_FORWARD', 'TURN_ANGLE']
    if top_down_map and 'TOP_DOWN_MAP' not in config.TASK.MEASUREMENTS:
        # Top-down map is expensive to compute, so we only enable it when needed.
        config.TASK.MEASUREMENTS.append('TOP_DOWN_MAP')
    config.freeze()
    return {
        'config': config,
        'image_key': image_key,
        'goal_key': goal_key,
        'depth_key': depth_key,
        'reward_function': reward_function
    }
예제 #4
0
def main():
    HabitatSimActions.extend_action_space("STRAFE_LEFT")
    HabitatSimActions.extend_action_space("STRAFE_RIGHT")

    config = habitat.get_config(config_paths="habitat-configs/pointnav.yaml")
    config.defrost()

    config.TASK.POSSIBLE_ACTIONS = config.TASK.POSSIBLE_ACTIONS + [
        "STRAFE_LEFT",
        "STRAFE_RIGHT",
    ]
    config.TASK.ACTIONS.STRAFE_LEFT = habitat.config.Config()
    config.TASK.ACTIONS.STRAFE_LEFT.TYPE = "StrafeLeft"
    config.TASK.ACTIONS.STRAFE_RIGHT = habitat.config.Config()
    config.TASK.ACTIONS.STRAFE_RIGHT.TYPE = "StrafeRight"
    config.SIMULATOR.ACTION_SPACE_CONFIG = "NoNoiseStrafe"
    config.freeze()

    env = habitat.Env(config=config)
    o1 = env.reset()
    print(env._sim.get_agent_state())
    curr_pose = env._sim.get_agent_state().position
    curr_pose = {'x': curr_pose[0], 'y': curr_pose[1], 'z': curr_pose[2]}
    img1 = cv2.resize(o1['rgb'], (512, 384), interpolation=cv2.INTER_LINEAR)

    env.step("STRAFE_LEFT")
    env.step("STRAFE_LEFT")
    env.step("STRAFE_LEFT")
    env.step("STRAFE_LEFT")
    o2 = env.step("STRAFE_LEFT")

    print(env._sim.get_agent_state())
    img2 = cv2.resize(o2['rgb'], (512, 384), interpolation=cv2.INTER_LINEAR)

    calc_error(img1, img2, curr_pose)
    env.step("STRAFE_RIGHT")
    env.step("STRAFE_RIGHT")
    env.step("STRAFE_RIGHT")
    o3 = env.step("STRAFE_RIGHT")
    curr_pose = env._sim.get_agent_state().position
    curr_pose = {'x': curr_pose[0], 'y': curr_pose[1], 'z': curr_pose[2]}

    img3 = cv2.resize(o3['rgb'], (512, 384), interpolation=cv2.INTER_LINEAR)
    calc_error(img1, img3, curr_pose)

    env.close()
예제 #5
0
    def __init__(self, args, rank, config_env, config_baseline, dataset):
        if args.visualize:
            plt.ion()
        if args.print_images or args.visualize:
            self.figure, self.ax = plt.subplots(1,
                                                2,
                                                figsize=(6 * 16 / 9, 6),
                                                facecolor="whitesmoke",
                                                num="Thread {}".format(rank))

        self.args = args
        self.num_actions = 3
        self.dt = 10

        self.rank = rank

        self.sensor_noise_fwd = \
                pickle.load(open("noise_models/sensor_noise_fwd.pkl", 'rb'))
        self.sensor_noise_right = \
                pickle.load(open("noise_models/sensor_noise_right.pkl", 'rb'))
        self.sensor_noise_left = \
                pickle.load(open("noise_models/sensor_noise_left.pkl", 'rb'))

        HabitatSimActions.extend_action_space("NOISY_FORWARD")
        HabitatSimActions.extend_action_space("NOISY_RIGHT")
        HabitatSimActions.extend_action_space("NOISY_LEFT")

        config_env.defrost()
        config_env.SIMULATOR.ACTION_SPACE_CONFIG = \
                "CustomActionSpaceConfiguration"
        # config_env.TASK.POSSIBLE_ACTIONS = config_env.TASK.POSSIBLE_ACTIONS + ['NOISY_FORWARD', 'NOISY_RIGHT', 'NOISY_LEFT']
        # config_env.TASK.ACTIONS.NOISY_FORWARD = habitat.config.Config()
        # config_env.TASK.ACTIONS.NOISY_FORWARD.TYPE = "NoisyForward"
        # config_env.TASK.ACTIONS.NOISY_RIGHT = habitat.config.Config()
        # config_env.TASK.ACTIONS.NOISY_RIGHT.TYPE = "NoisyRight"
        # config_env.TASK.ACTIONS.NOISY_LEFT = habitat.config.Config()
        # config_env.TASK.ACTIONS.NOISY_LEFT.TYPE = "NoisyLeft"
        config_env.freeze()

        super().__init__(config_env, dataset)

        self.action_space = gym.spaces.Discrete(self.num_actions)

        self.observation_space = gym.spaces.Box(
            0, 255, (3, args.frame_height, args.frame_width), dtype='uint8')

        self.mapper = self.build_mapper()

        self.episode_no = 0

        self.res = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((args.frame_height, args.frame_width),
                              interpolation=Image.NEAREST)
        ])
        self.scene_name = None
        self.maps_dict = {}
예제 #6
0
def _try_register_rearrange_task():
    try:
        import habitat.tasks.rearrange.rearrange_sensors
    except ImportError as e:
        print(e)

    try:
        import habitat.tasks.rearrange.rearrange_pick_task
        import habitat.tasks.rearrange.rearrange_task

    except ImportError as e:
        print(e)
        rearrangetask_import_error = e

        @registry.register_task(name="Rearrange-v0")
        class RearrangeTaskImportError(EmbodiedTask):
            def __init__(self, *args, **kwargs):
                raise rearrangetask_import_error

    # Register actions
    import habitat.tasks.rearrange.actions

    if not HabitatSimActions.has_action("ARM_ACTION"):
        HabitatSimActions.extend_action_space("ARM_ACTION")
    if not HabitatSimActions.has_action("ARM_VEL"):
        HabitatSimActions.extend_action_space("ARM_VEL")
    if not HabitatSimActions.has_action("MAGIC_GRASP"):
        HabitatSimActions.extend_action_space("MAGIC_GRASP")
    if not HabitatSimActions.has_action("BASE_VELOCITY"):
        HabitatSimActions.extend_action_space("BASE_VELOCITY")
    if not HabitatSimActions.has_action("ARM_EE"):
        HabitatSimActions.extend_action_space("ARM_EE")
    if not HabitatSimActions.has_action("EMPTY"):
        HabitatSimActions.extend_action_space("EMPTY")
예제 #7
0
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.

# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

import habitat_sim
from habitat.core.registry import registry
from habitat.core.simulator import ActionSpaceConfiguration
from habitat.sims.habitat_simulator.actions import HabitatSimActions

HabitatSimActions.extend_action_space("MOVE_BACKWARD")
HabitatSimActions.extend_action_space("MOVE_LEFT")
HabitatSimActions.extend_action_space("MOVE_RIGHT")


@registry.register_action_space_configuration(name="move-all")
class MoveOnlySpaceConfiguration(ActionSpaceConfiguration):
    def get(self):
        return {
            HabitatSimActions.STOP: habitat_sim.ActionSpec("stop"),
            HabitatSimActions.MOVE_FORWARD: habitat_sim.ActionSpec(
                "move_forward",
                habitat_sim.ActuationSpec(
                    amount=self.config.FORWARD_STEP_SIZE
                ),
            ),
            HabitatSimActions.MOVE_BACKWARD: habitat_sim.ActionSpec(
                "move_backward",
                habitat_sim.ActuationSpec(
                    amount=self.config.FORWARD_STEP_SIZE
예제 #8
0
    def __init__(self, config: Config, dataset: Optional[Dataset] = None):
        self.noise = NOISY
        print('[VisTargetNavEnv] NOISY ACTUATION : ', self.noise)
        if hasattr(config, 'AGENT_TASK'):
            self.agent_task = config.AGENT_TASK
        else:
            self.agent_task = 'search'
        if self.agent_task == 'homing':
            self.num_goals = 2
        else:
            self.num_goals = config.NUM_GOALS

        task_config = config.TASK_CONFIG
        task_config.defrost()
        #task_config.TASK.TOP_DOWN_MAP.MAP_RESOLUTION = 1250
        task_config.TASK.TOP_DOWN_MAP.DRAW_SOURCE = True
        task_config.TASK.TOP_DOWN_MAP.DRAW_SHORTEST_PATH = True
        task_config.TASK.TOP_DOWN_MAP.FOG_OF_WAR.VISIBILITY_DIST = 2.0
        task_config.TASK.TOP_DOWN_MAP.FOG_OF_WAR.FOV = 360
        task_config.TASK.TOP_DOWN_MAP.FOG_OF_WAR.DRAW = True
        task_config.TASK.TOP_DOWN_MAP.DRAW_VIEW_POINTS = False
        task_config.TASK.TOP_DOWN_MAP.DRAW_GOAL_POSITIONS = True
        task_config.TASK.TOP_DOWN_MAP.DRAW_GOAL_AABBS = False
        if ('GMT' in config.POLICY or 'NTS' in config.POLICY) and RENDER:
            task_config.TASK.TOP_DOWN_GRAPH_MAP = config.TASK_CONFIG.TASK.TOP_DOWN_MAP.clone(
            )
            if 'GMT' in config.POLICY:
                task_config.TASK.TOP_DOWN_GRAPH_MAP.TYPE = "TopDownGraphMap"
            elif 'NTS' in config.POLICY:
                task_config.TASK.TOP_DOWN_GRAPH_MAP.TYPE = 'NTSGraphMap'
                task_config.TASK.TOP_DOWN_GRAPH_MAP.MAP_RESOLUTION = 4000
            task_config.TASK.TOP_DOWN_GRAPH_MAP.NUM_TOPDOWN_MAP_SAMPLE_POINTS = 20000
            task_config.TASK.MEASUREMENTS += ['TOP_DOWN_GRAPH_MAP']
            if 'TOP_DOWN_MAP' in config.TASK_CONFIG.TASK.MEASUREMENTS:
                task_config.TASK.MEASUREMENTS = [
                    k for k in task_config.TASK.MEASUREMENTS
                    if 'TOP_DOWN_MAP' != k
                ]
        task_config.SIMULATOR.ACTION_SPACE_CONFIG = "CustomActionSpaceConfiguration"
        task_config.TASK.POSSIBLE_ACTIONS = task_config.TASK.POSSIBLE_ACTIONS + [
            'NOISY_FORWARD', 'NOISY_RIGHT', 'NOISY_LEFT'
        ]
        task_config.TASK.ACTIONS.NOISY_FORWARD = habitat.config.Config()
        task_config.TASK.ACTIONS.NOISY_FORWARD.TYPE = "NOISYFORWARD"
        task_config.TASK.ACTIONS.NOISY_RIGHT = habitat.config.Config()
        task_config.TASK.ACTIONS.NOISY_RIGHT.TYPE = "NOISYRIGHT"
        task_config.TASK.ACTIONS.NOISY_LEFT = habitat.config.Config()
        task_config.TASK.ACTIONS.NOISY_LEFT.TYPE = "NOISYLEFT"
        task_config.TASK.MEASUREMENTS = [
            'GOAL_INDEX'
        ] + task_config.TASK.MEASUREMENTS + ['SOFT_SPL']
        task_config.TASK.DISTANCE_TO_GOAL.TYPE = 'Custom_DistanceToGoal'
        if self.agent_task != 'search':
            task_config.TASK.SPL.TYPE = 'Custom_SPL'
        task_config.TASK.SOFT_SPL.TYPE = 'Custom_SoftSPL'
        task_config.TASK.GOAL_INDEX = task_config.TASK.SPL.clone()
        task_config.TASK.GOAL_INDEX.TYPE = 'GoalIndex'
        task_config.freeze()
        self.config = config
        self._core_env_config = config.TASK_CONFIG
        self._reward_measure_name = config.REWARD_METHOD
        self._success_measure_name = config.SUCCESS_MEASURE
        self.success_distance = config.SUCCESS_DISTANCE
        self._previous_measure = None
        self._previous_action = -1
        self.time_t = 0
        self.stuck = 0
        self.follower = None
        if 'NOISY_FORWARD' not in HabitatSimActions:
            HabitatSimActions.extend_action_space("NOISY_FORWARD")
            HabitatSimActions.extend_action_space("NOISY_RIGHT")
            HabitatSimActions.extend_action_space("NOISY_LEFT")
        if 'STOP' in task_config.TASK.POSSIBLE_ACTIONS:
            self.action_dict = {
                0: HabitatSimActions.STOP,
                1: "NOISY_FORWARD",
                2: "NOISY_LEFT",
                3: "NOISY_RIGHT"
            }
        else:
            self.action_dict = {
                0: "NOISY_FORWARD",
                1: "NOISY_LEFT",
                2: "NOISY_RIGHT"
            }
        super().__init__(self._core_env_config, dataset)
        act_dict = {
            "MOVE_FORWARD": EmptySpace(),
            'TURN_LEFT': EmptySpace(),
            'TURN_RIGHT': EmptySpace()
        }
        if 'STOP' in task_config.TASK.POSSIBLE_ACTIONS:
            act_dict.update({'STOP': EmptySpace()})
        self.action_space = ActionSpace(act_dict)
        obs_dict = {
            'panoramic_rgb':
            self.habitat_env._task.sensor_suite.observation_spaces.
            spaces['panoramic_rgb'],
            'panoramic_depth':
            self.habitat_env._task.sensor_suite.observation_spaces.
            spaces['panoramic_depth'],
            'target_goal':
            self.habitat_env._task.sensor_suite.observation_spaces.
            spaces['target_goal'],
            'step':
            Box(low=np.array(0), high=np.array(500), dtype=np.float32),
            'prev_act':
            Box(low=np.array(-1),
                high=np.array(self.action_space.n),
                dtype=np.int32),
            'gt_action':
            Box(low=np.array(-1),
                high=np.array(self.action_space.n),
                dtype=np.int32),
            'position':
            Box(low=-np.Inf, high=np.Inf, shape=(3, ), dtype=np.float32),
            'target_pose':
            Box(low=-np.Inf, high=np.Inf, shape=(3, ), dtype=np.float32),
            'distance':
            Box(low=-np.Inf, high=np.Inf, shape=(1, ), dtype=np.float32),
        }
        if 'GMT' in config.POLICY and RENDER:
            self.mapper = self.habitat_env.task.measurements.measures[
                'top_down_map']
            #obs_dict.update({'unexplored':Box(low=0, high=1, shape=(self.mapper.delta,), dtype=np.int32),
            #    'neighbors': Box(low=0, high=1, shape=(self.mapper.delta,), dtype=np.int32),})
        else:
            self.mapper = None
        if 'aux' in self.config.POLICY:
            self.return_have_been = True
            self.return_target_dist_score = True
            obs_dict.update({
                'have_been':
                Box(low=0, high=1, shape=(1, ), dtype=np.int32),
                'target_dist_score':
                Box(low=0, high=1, shape=(1, ), dtype=np.float32),
            })
        else:
            self.return_have_been = False
            self.return_target_dist_score = False

        self.observation_space = SpaceDict(obs_dict)

        if config.DIFFICULTY == 'easy':
            self.habitat_env.difficulty = 'easy'
            self.habitat_env.MIN_DIST, self.habitat_env.MAX_DIST = 1.5, 3.0
        elif config.DIFFICULTY == 'medium':
            self.habitat_env.difficulty = 'medium'
            self.habitat_env.MIN_DIST, self.habitat_env.MAX_DIST = 3.0, 5.0
        elif config.DIFFICULTY == 'hard':
            self.habitat_env.difficulty = 'hard'
            self.habitat_env.MIN_DIST, self.habitat_env.MAX_DIST = 5.0, 10.0
        elif config.DIFFICULTY == 'random':
            self.habitat_env.difficulty = 'random'
            self.habitat_env.MIN_DIST, self.habitat_env.MAX_DIST = 3.0, 10.0
        else:
            raise NotImplementedError

        self.habitat_env._num_goals = self.num_goals
        self.habitat_env._agent_task = self.agent_task
        print('current task : %s' % (self.agent_task))
        print('current difficulty %s, MIN_DIST %f, MAX_DIST %f - # goals %d' %
              (config.DIFFICULTY, self.habitat_env.MIN_DIST,
               self.habitat_env.MAX_DIST, self.habitat_env._num_goals))

        self.min_measure = self.habitat_env.MAX_DIST
        self.reward_method = config.REWARD_METHOD
        if self.reward_method == 'progress':
            self.get_reward = self.get_progress_reward
        elif self.reward_method == 'milestone':
            self.get_reward = self.get_milestone_reward
        elif self.reward_method == 'coverage':
            self.get_reward = self.get_coverage_reward

        self.run_mode = 'RL'
        self.number_of_episodes = 1000
        self.need_gt_action = False
        self.has_log_info = None