def main(): # Get the default config node config = habitat.get_config(config_paths="configs/tasks/pointnav.yaml") config.defrost() # Add things to the config to for the measure config.TASK.EPISODE_INFO = CN() # The type field is used to look-up the measure in the registry. # By default, the things are registered with the class name config.TASK.EPISODE_INFO.TYPE = "EpisodeInfo" config.TASK.EPISODE_INFO.VALUE = 5 # Add the measure to the list of measures in use config.TASK.MEASUREMENTS.append("EPISODE_INFO") # Now define the config for the sensor config.TASK.AGENT_POSITION_SENSOR = CN() # Use the custom name config.TASK.AGENT_POSITION_SENSOR.TYPE = "my_supercool_sensor" config.TASK.AGENT_POSITION_SENSOR.ANSWER_TO_LIFE = 42 # Add the sensor to the list of sensors in use config.TASK.SENSORS.append("AGENT_POSITION_SENSOR") config.freeze() env = habitat.Env(config=config) print(env.reset()["agent_position"]) print(env.get_metrics()["episode_info"]) print( env.step( habitat.sims.habitat_simulator.SimulatorActions.MOVE_FORWARD.value )["agent_position"] ) print(env.get_metrics()["episode_info"])
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union from habitat.config import Config as CN # type: ignore DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.SEED = 100 # ----------------------------------------------------------------------------- # ENVIRONMENT # ----------------------------------------------------------------------------- _C.ENVIRONMENT = CN() _C.ENVIRONMENT.MAX_EPISODE_STEPS = 1000 _C.ENVIRONMENT.MAX_EPISODE_SECONDS = 10000000 _C.ENVIRONMENT.ITERATOR_OPTIONS = CN() _C.ENVIRONMENT.ITERATOR_OPTIONS.CYCLE = True _C.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = True _C.ENVIRONMENT.ITERATOR_OPTIONS.GROUP_BY_SCENE = True _C.ENVIRONMENT.ITERATOR_OPTIONS.NUM_EPISODE_SAMPLE = -1 _C.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_EPISODES = -1 _C.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = int(1e4) _C.ENVIRONMENT.ITERATOR_OPTIONS.STEP_REPETITION_RANGE = 0.2
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union import numpy as np from habitat import get_config as get_task_config from habitat.config import Config as CN DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # EXPERIMENT CONFIG # ----------------------------------------------------------------------------- _C = CN() _C.BASE_TASK_CONFIG_PATH = "configs/tasks/pointnav.yaml" _C.TASK_CONFIG = CN() # task_config will be stored as a config node _C.CMD_TRAILING_OPTS = [] # store command line options as list of strings _C.TRAINER_NAME = "ppo" _C.ENV_NAME = "NavRLEnv" _C.SIMULATOR_GPU_ID = 0 _C.TORCH_GPU_ID = 0 _C.VIDEO_OPTION = ["disk", "tensorboard"] _C.TENSORBOARD_DIR = "tb" _C.VIDEO_DIR = "video_dir" _C.TEST_EPISODE_COUNT = -1 _C.EVAL_CKPT_PATH_DIR = "data/checkpoints" # path to ckpt or path to ckpts dir _C.NUM_PROCESSES = 16 _C.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"] _C.CHECKPOINT_FOLDER = "data/checkpoints"
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union from habitat.config import Config as CN # type: ignore DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.SEED = 100 # ----------------------------------------------------------------------------- # ENVIRONMENT # ----------------------------------------------------------------------------- _C.ENVIRONMENT = CN() _C.ENVIRONMENT.MAX_EPISODE_STEPS = 1000 _C.ENVIRONMENT.MAX_EPISODE_SECONDS = 10000000 _C.ENVIRONMENT.ITERATOR_OPTIONS = CN() _C.ENVIRONMENT.ITERATOR_OPTIONS.CYCLE = True _C.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False _C.ENVIRONMENT.ITERATOR_OPTIONS.GROUP_BY_SCENE = True _C.ENVIRONMENT.ITERATOR_OPTIONS.NUM_EPISODE_SAMPLE = -1 _C.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT = -1 # ----------------------------------------------------------------------------- # TASK
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union from habitat.config import Config as CN # type: ignore DEFAULT_CONFIG_DIR = "/private/home/medhini/navigation-analysis-habitat/habitat-api/configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.SEED = 100 # ----------------------------------------------------------------------------- # ENVIRONMENT # ----------------------------------------------------------------------------- _C.ENVIRONMENT = CN() _C.ENVIRONMENT.MAX_EPISODE_STEPS = 1000 _C.ENVIRONMENT.MAX_EPISODE_SECONDS = 10000000 # ----------------------------------------------------------------------------- # TASK # ----------------------------------------------------------------------------- _C.TASK = CN() _C.TASK.TYPE = "Nav-v0" _C.TASK.SUCCESS_DISTANCE = 0.2 _C.TASK.SENSORS = [] _C.TASK.MEASUREMENTS = []
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union import numpy as np from habitat import get_config as get_task_config from habitat.config import Config as CN DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # EXPERIMENT CONFIG # ----------------------------------------------------------------------------- _C = CN() _C.MULTIPLY_SCENES = False _C.BASE_TASK_CONFIG_PATH = "configs/tasks/explore_replica.yaml" _C.TASK_CONFIG = CN() # task_config will be stored as a config node _C.SHARED_DATA = [] # HACKY share stuff _C.SHARED_SIZES = [] # HACKY share stuff _C.CMD_TRAILING_OPTS = [] # store command line options as list of strings _C.TRAINER_NAME = "ppo" _C.ENV_NAME = "NavRLEnv" _C.SIMULATOR_GPU_ID = 0 _C.TORCH_GPU_ID = 0 _C.VIDEO_OPTION = ["disk", "tensorboard"] _C.VIDEO_OPTION_INTERVAL = 10 _C.TENSORBOARD_DIR = "tb" _C.VIDEO_DIR = "video_dir" _C.TEST_EPISODE_COUNT = 36
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union import numpy as np from habitat import get_config as get_task_config from habitat.config import Config as CN DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # EXPERIMENT CONFIG # ----------------------------------------------------------------------------- _C = CN() _C.BASE_TASK_CONFIG_PATH = "configs/tasks/pointnav.yaml" _C.TASK_CONFIG = CN() # task_config will be stored as a config node _C.CMD_TRAILING_OPTS = [] # store command line options as list of strings _C.TRAINER_NAME = "ppo" _C.ENV_NAME = "NavRLEnv" _C.SIMULATOR_GPU_ID = 0 _C.TORCH_GPU_ID = 0 _C.VIDEO_OPTION = ["disk", "tensorboard"] _C.TENSORBOARD_DIR = "tb" _C.VIDEO_DIR = "video_dir" _C.TEST_EPISODE_COUNT = 2 _C.EVAL_CKPT_PATH_DIR = "data/checkpoints" # path to ckpt or path to ckpts dir _C.NUM_PROCESSES = 16 _C.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"] _C.CHECKPOINT_FOLDER = "data/checkpoints"
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union from habitat.config import Config as CN # type: ignore DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.SEED = 100 # ----------------------------------------------------------------------------- # ENVIRONMENT # ----------------------------------------------------------------------------- _C.ENVIRONMENT = CN() _C.ENVIRONMENT.MAX_EPISODE_STEPS = 1000 _C.ENVIRONMENT.MAX_EPISODE_SECONDS = 10000000 # ----------------------------------------------------------------------------- # TASK # ----------------------------------------------------------------------------- _C.TASK = CN() _C.TASK.TYPE = "Nav-v0" _C.TASK.SUCCESS_DISTANCE = 0.2 _C.TASK.SENSORS = [] _C.TASK.MEASUREMENTS = []
# LICENSE file in the root directory of this source tree. from typing import List, Optional, Union import numpy as np from habitat import get_config as get_task_config from habitat.config import Config as CN import os DEFAULT_CONFIG_DIR = "IL_configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # EXPERIMENT CONFIG # ----------------------------------------------------------------------------- _C = CN() _C.VERSION = 'base' _C.AGENT_TASK = 'search' _C.BASE_TASK_CONFIG_PATH = "IL_configs/tasks/pointnav.yaml" _C.TASK_CONFIG = CN() # task_config will be stored as a config node _C.CMD_TRAILING_OPTS = [] # store command line options as list of strings _C.TRAINER_NAME = "ppo" _C.ENV_NAME = "NavRLEnv" _C.SIMULATOR_GPU_ID = 0 _C.TORCH_GPU_ID = 0 _C.VIDEO_OPTION = ["disk", "tensorboard"] _C.TENSORBOARD_DIR = "logs/" _C.VIDEO_DIR = "data/video_dir" _C.TEST_EPISODE_COUNT = 2 _C.EVAL_CKPT_PATH_DIR = "data/eval_checkpoints" # path to ckpt or path to ckpts dir _C.NUM_PROCESSES = 16
from habitat.config import Config as CN # type: ignore from habitat.config.default import _C, CONFIG_FILE_SEPARATOR # import sensors.detectron _C.TASK.DETECTRON_SENSOR = CN() _C.TASK.DETECTRON_SENSOR.TYPE = 'detectron_sensor' _C.TASK.DETECTRON_SENSOR.DEVICE = 'cuda' _C.TASK.MULTI_SPL = CN() _C.TASK.MULTI_SPL.TYPE = 'MULTI_SPL' _C.TASK.MULTI_SPL.SUCCESS_DISTANCE = 0.2 def get_config(config_paths=None, opts=None): r"""Create a unified config with default values overwritten by values from :p:`config_paths` and overwritten by options from :p:`opts`. :param config_paths: List of config paths or string that contains comma separated list of config paths. :param opts: Config options (keys, values) in a list (e.g., passed from command line into the config. For example, :py:`opts = ['FOO.BAR', 0.5]`. Argument can be used for parameter sweeping or quick tests. """ config = _C.clone() if config_paths: if isinstance(config_paths, str): if CONFIG_FILE_SEPARATOR in config_paths: config_paths = config_paths.split(CONFIG_FILE_SEPARATOR) else: config_paths = [config_paths]
import os import logging import shutil import numpy as np from habitat import get_config as get_task_config from habitat.config import Config as CN import habitat DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # EXPERIMENT CONFIG # ----------------------------------------------------------------------------- _C = CN() _C.SEED = 0 _C.BASE_TASK_CONFIG_PATH = "configs/tasks/pointgoal.yaml" _C.TASK_CONFIG = CN() # task_config will be stored as a config node _C.CMD_TRAILING_OPTS = [] # store command line options as list of strings _C.TRAINER_NAME = "AVNavTrainer" _C.ENV_NAME = "AudioNavRLEnv" _C.SIMULATOR_GPU_ID = 0 _C.TORCH_GPU_ID = 0 _C.VIDEO_OPTION = ["disk", "tensorboard"] _C.VISUALIZATION_OPTION = ["top_down_map"] _C.TENSORBOARD_DIR = "tb" _C.VIDEO_DIR = "video_dir" _C.TEST_EPISODE_COUNT = 2 _C.EVAL_CKPT_PATH_DIR = "data/checkpoints" # path to ckpt or path to ckpts dir _C.NUM_PROCESSES = 16
self.needs_inspection = True return {"action": action} if self.unseen_obstacle: command = HabitatSimActions.TURN_RIGHT return command command = HabitatSimActions.STOP command = self.planner_prediction_to_command(self.waypointPose6D) return command config = get_config("../habitat-api/configs/tasks/objectnav_mp3d_fast.yaml") config.defrost() # ----------------------------------------------------------------------------- # ORBSLAM2 BASELINE # ----------------------------------------------------------------------------- config.ORBSLAM2 = CN() config.ORBSLAM2.SLAM_VOCAB_PATH = "../habitat-api/habitat_baselines/slambased/data/ORBvoc.txt" config.ORBSLAM2.SLAM_SETTINGS_PATH = ( "../habitat-api/habitat_baselines/slambased/data/mp3d3_small1k.yaml") config.ORBSLAM2.MAP_CELL_SIZE = 0.1 config.ORBSLAM2.MAP_SIZE = 40 config.ORBSLAM2.CAMERA_HEIGHT = config.SIMULATOR.DEPTH_SENSOR.POSITION[1] config.ORBSLAM2.BETA = 100 config.ORBSLAM2.H_OBSTACLE_MIN = 0.3 * config.ORBSLAM2.CAMERA_HEIGHT config.ORBSLAM2.H_OBSTACLE_MAX = 1.0 * config.ORBSLAM2.CAMERA_HEIGHT config.ORBSLAM2.D_OBSTACLE_MIN = 0.1 config.ORBSLAM2.D_OBSTACLE_MAX = 4.0 config.ORBSLAM2.PREPROCESS_MAP = True config.ORBSLAM2.MIN_PTS_IN_OBSTACLE = (config.SIMULATOR.DEPTH_SENSOR.WIDTH / 2.0) config.ORBSLAM2.ANGLE_TH = float(np.deg2rad(15))
import logging import shutil import numpy as np from habitat import get_config as get_task_config from habitat.config import Config as CN import habitat from habitat.config.default import SIMULATOR_SENSOR DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # EXPERIMENT CONFIG # ----------------------------------------------------------------------------- _C = CN() _C.SEED = 0 _C.BASE_TASK_CONFIG_PATH = "configs/tasks/pointgoal.yaml" _C.TASK_CONFIG = CN() # task_config will be stored as a config node _C.CMD_TRAILING_OPTS = [] # store command line options as list of strings _C.TRAINER_NAME = "savi" _C.ENV_NAME = "AudioNavRLEnv" _C.SIMULATOR_GPU_ID = 0 _C.TORCH_GPU_ID = 0 _C.VIDEO_OPTION = ["disk", "tensorboard"] _C.VISUALIZATION_OPTION = ["top_down_map"] _C.TENSORBOARD_DIR = "tb" _C.VIDEO_DIR = "video_dir" _C.TEST_EPISODE_COUNT = 2 _C.EVAL_CKPT_PATH_DIR = "data/checkpoints" # path to ckpt or path to ckpts dir _C.NUM_PROCESSES = 16
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union import numpy as np from habitat import get_config as get_task_config from habitat.config import Config as CN DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.BASE_TASK_CONFIG_PATH = "configs/tasks/pointnav.yaml" _C.TASK_CONFIG = CN() # task_config will be stored as a config node _C.CMD_TRAILING_OPTS = "" # store command line options" # ----------------------------------------------------------------------------- # TRAINER ALGORITHMS # ----------------------------------------------------------------------------- _C.TRAINER = CN() _C.TRAINER.TRAINER_NAME = "ppo" # ----------------------------------------------------------------------------- # REINFORCEMENT LEARNING (RL) # ----------------------------------------------------------------------------- _C.TRAINER.RL = CN() _C.TRAINER.RL.SUCCESS_REWARD = 10.0 _C.TRAINER.RL.SLACK_REWARD = -0.01 # -----------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union from habitat.config import Config as CN # type: ignore DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.SEED = 100 # ----------------------------------------------------------------------------- # ENVIRONMENT # ----------------------------------------------------------------------------- _C.ENVIRONMENT = CN() _C.ENVIRONMENT.MAX_EPISODE_STEPS = 1000 _C.ENVIRONMENT.MAX_EPISODE_SECONDS = 10000000 _C.ENVIRONMENT.ITERATOR_OPTIONS = CN() _C.ENVIRONMENT.ITERATOR_OPTIONS.CYCLE = True _C.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = True _C.ENVIRONMENT.ITERATOR_OPTIONS.GROUP_BY_SCENE = False _C.ENVIRONMENT.ITERATOR_OPTIONS.NUM_EPISODE_SAMPLE = -1 _C.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT = -1 _C.ENVIRONMENT.OVERRIDE_RAND_GOAL = CN()
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union import numpy as np from habitat import get_config from habitat.config import Config as CN DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.SEED = 100 # ----------------------------------------------------------------------------- # BASELINE # ----------------------------------------------------------------------------- _C.BASELINE = CN() # ----------------------------------------------------------------------------- # REINFORCEMENT LEARNING (RL) # ----------------------------------------------------------------------------- _C.BASELINE.RL = CN() _C.BASELINE.RL.SUCCESS_REWARD = 10.0 _C.BASELINE.RL.SLACK_REWARD = -0.01 # ----------------------------------------------------------------------------- # ORBSLAM2 BASELINE # ----------------------------------------------------------------------------- _C.BASELINE.ORBSLAM2 = CN()
# LICENSE file in the root directory of this source tree. import warnings from typing import List, Optional, Union import numpy as np from habitat import get_config as get_task_config from habitat.config import Config as CN DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # EXPERIMENT CONFIG # ----------------------------------------------------------------------------- _C = CN() # task config can be a list of conifgs like "A.yaml,B.yaml" _C.BASE_TASK_CONFIG_PATH = "configs/tasks/pointnav.yaml" _C.TASK_CONFIG = CN() # task_config will be stored as a config node _C.CMD_TRAILING_OPTS = [] # store command line options as list of strings _C.TRAINER_NAME = "ppo" _C.ENV_NAME = "NavRLEnv" _C.SIMULATOR_GPU_ID = 0 _C.TORCH_GPU_ID = 0 _C.VIDEO_OPTION = ["disk", "tensorboard"] _C.TENSORBOARD_DIR = "tb" _C.VIDEO_DIR = "video_dir" _C.TEST_EPISODE_COUNT = -1 _C.EVAL_CKPT_PATH_DIR = "data/checkpoints" # path to ckpt or path to ckpts dir _C.NUM_ENVIRONMENTS = 16 _C.NUM_PROCESSES = -1 # depricated
# LICENSE file in the root directory of this source tree. from typing import List, Optional, Union import math import numpy as np from habitat_extensions import get_extended_config as get_task_config from habitat.config import Config as CN DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # EXPERIMENT CONFIG # ----------------------------------------------------------------------------- _C = CN() _C.PYT_RANDOM_SEED = 123 _C.BASE_TASK_CONFIG_PATH = "habitat_extensions/config/exploration_gibson.yaml" _C.TASK_CONFIG = CN() # task_config will be stored as a config node _C.CMD_TRAILING_OPTS = [] # store command line options as list of strings _C.TRAINER_NAME = "occant_exp" _C.ENV_NAME = "ExpRLEnv" _C.SIMULATOR_GPU_ID = 0 _C.SIMULATOR_GPU_IDS = [] # Assign specific GPUs to simulator _C.TORCH_GPU_ID = 0 _C.VIDEO_OPTION = ["disk", "tensorboard"] _C.TENSORBOARD_DIR = "tb" _C.VIDEO_DIR = "video_dir" _C.TEST_EPISODE_COUNT = -1 _C.EVAL_CKPT_PATH_DIR = "data/checkpoints" # path to ckpt or path to ckpts dir _C.EVAL_PREV_CKPT_ID = -1 # The evaluation starts at (this value + 1)th ckpt
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from typing import Optional from habitat.config import Config as CN DEFAULT_CONFIG_DIR = "configs/" # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.SEED = 100 # ----------------------------------------------------------------------------- # BASELINES # ----------------------------------------------------------------------------- _C.BASELINE = CN() # ----------------------------------------------------------------------------- # REINFORCEMENT LEARNING (RL) # ----------------------------------------------------------------------------- _C.BASELINE.RL = CN() _C.BASELINE.RL.SUCCESS_REWARD = 10.0 _C.BASELINE.RL.SLACK_REWARD = -0.01 # ----------------------------------------------------------------------------- def cfg(config_file: Optional[str] = None,
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from typing import Optional from habitat.config import Config as CN # type: ignore DEFAULT_CONFIG_DIR = "configs/" # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.SEED = 100 # ----------------------------------------------------------------------------- # ENVIRONMENT # ----------------------------------------------------------------------------- _C.ENVIRONMENT = CN() _C.ENVIRONMENT.MAX_EPISODE_STEPS = 1000 _C.ENVIRONMENT.MAX_EPISODE_SECONDS = 10000000 # ----------------------------------------------------------------------------- # TASK # ----------------------------------------------------------------------------- _C.TASK = CN() _C.TASK.TYPE = "Nav-v0" _C.TASK.SUCCESS_DISTANCE = 0.2 _C.TASK.SENSORS = [] _C.TASK.MEASUREMENTS = []