def test_reconfigure_render( scene, sensor_type, make_cfg_settings, ): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) for sens in all_sensor_types: make_cfg_settings[sens] = False make_cfg_settings["scene"] = _test_scenes[-1] make_cfg_settings[sensor_type] = True cfg = make_cfg(make_cfg_settings) with habitat_sim.Simulator(cfg) as sim: make_cfg_settings["scene"] = scene sim.reconfigure(make_cfg(make_cfg_settings)) obs, gt = _render_and_load_gt(sim, scene, sensor_type, False) # Different GPUs and different driver version will produce slightly # different images; differences on aliased edges might also stem from how a # particular importer parses transforms assert np.linalg.norm( obs[sensor_type].astype(np.float) - gt.astype(np.float)) < 9.0e-2 * np.linalg.norm(gt.astype( np.float)), f"Incorrect {sensor_type} output" sim.close()
def test_smoke_redwood_noise(scene, gpu2gpu, make_cfg_settings): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) if not habitat_sim.cuda_enabled and gpu2gpu: pytest.skip("Skipping GPU->GPU test") make_cfg_settings["depth_sensor"] = True make_cfg_settings["color_sensor"] = False make_cfg_settings["semantic_sensor"] = False make_cfg_settings["scene"] = scene hsim_cfg = make_cfg(make_cfg_settings) hsim_cfg.agents[0].sensor_specifications[ 0].noise_model = "RedwoodDepthNoiseModel" for sensor_spec in hsim_cfg.agents[0].sensor_specifications: sensor_spec.gpu2gpu_transfer = gpu2gpu with habitat_sim.Simulator(hsim_cfg) as sim: obs, gt = _render_and_load_gt(sim, scene, "depth_sensor", gpu2gpu) assert np.linalg.norm( obs["depth_sensor"].astype(np.float) - gt.astype(np.float)) > 1.5e-2 * np.linalg.norm(gt.astype( np.float)), "Incorrect depth_sensor output" sim.close()
def test_sensors(scene, has_sem, sensor_type, gpu2gpu, sim, make_cfg_settings): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) if not habitat_sim.cuda_enabled and gpu2gpu: pytest.skip("Skipping GPU->GPU test") make_cfg_settings = {k: v for k, v in make_cfg_settings.items()} make_cfg_settings["semantic_sensor"] = has_sem make_cfg_settings["scene"] = scene cfg = make_cfg(make_cfg_settings) for sensor_spec in cfg.agents[0].sensor_specifications: sensor_spec.gpu2gpu_transfer = gpu2gpu sim.reconfigure(cfg) obs, gt = _render_and_load_gt(sim, scene, sensor_type, gpu2gpu) # Different GPUs and different driver version will produce slightly # different images; differences on aliased edges might also stem from how a # particular importer parses transforms assert np.linalg.norm(obs[sensor_type].astype(np.float) - gt.astype(np.float)) < 5.0e-2 * np.linalg.norm( gt.astype( np.float)), f"Incorrect {sensor_type} output"
def test_sensors( scene, sensor_type, gpu2gpu, frustum_culling, make_cfg_settings, ): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) if not habitat_sim.cuda_enabled and gpu2gpu: pytest.skip("Skipping GPU->GPU test") for sens in all_sensor_types: make_cfg_settings[sens] = False make_cfg_settings[sensor_type] = True make_cfg_settings["scene"] = scene make_cfg_settings["frustum_culling"] = frustum_culling cfg = make_cfg(make_cfg_settings) for sensor_spec in cfg.agents[0].sensor_specifications: sensor_spec.gpu2gpu_transfer = gpu2gpu with habitat_sim.Simulator(cfg) as sim: obs, gt = _render_and_load_gt(sim, scene, sensor_type, gpu2gpu) # Different GPUs and different driver version will produce slightly # different images; differences on aliased edges might also stem from how a # particular importer parses transforms assert np.linalg.norm( obs[sensor_type].astype(np.float) - gt.astype(np.float) ) < 9.0e-2 * np.linalg.norm( gt.astype(np.float) ), f"Incorrect {sensor_type} output"
def test_semantic_scene(scene, make_cfg_settings): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) make_cfg_settings = {k: v for k, v in make_cfg_settings.items()} make_cfg_settings["semantic_sensor"] = False make_cfg_settings["scene"] = scene cfg = make_cfg(make_cfg_settings) cfg.agents[0].sensor_specifications = [] sim = habitat_sim.Simulator(cfg) scene = sim.semantic_scene for obj in scene.objects: obj.aabb obj.aabb.sizes obj.aabb.center obj.id obj.obb.rotation obj.category.name() obj.category.index() for region in scene.regions: region.id region.category.name() region.category.index() for level in scene.levels: level.id
def test_initial_hfov(scene, sensor_type, make_cfg_settings): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) make_cfg_settings["hfov"] = 70 with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim: assert sim.agents[0]._sensors[sensor_type].hfov == mn.Deg( 70), "HFOV was not properly set"
def test_semantic_scene(scene, sim, make_cfg_settings): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) make_cfg_settings = {k: v for k, v in make_cfg_settings.items()} make_cfg_settings["semantic_sensor"] = False make_cfg_settings["scene"] = scene sim.reconfigure(make_cfg(make_cfg_settings)) scene = sim.semantic_scene for obj in scene.objects: obj.aabb obj.aabb.sizes obj.aabb.center obj.id obj.obb.rotation obj.category.name() obj.category.index() for region in scene.regions: region.id region.category.name() region.category.index() for level in scene.levels: level.id
def test_sensors(scene, has_sem, sensor_type, gpu2gpu, sim, make_cfg_settings): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) if not habitat_sim.cuda_enabled and gpu2gpu: pytest.skip("Skipping GPU->GPU test") make_cfg_settings = {k: v for k, v in make_cfg_settings.items()} make_cfg_settings["semantic_sensor"] = has_sem make_cfg_settings["scene"] = scene cfg = make_cfg(make_cfg_settings) for sensor_spec in cfg.agents[0].sensor_specifications: sensor_spec.gpu2gpu_transfer = gpu2gpu sim.reconfigure(cfg) with open( osp.abspath( osp.join( osp.dirname(__file__), "gt_data", "{}-state.json".format(osp.basename( osp.splitext(scene)[0])), )), "r", ) as f: render_state = json.load(f) state = habitat_sim.AgentState() state.position = render_state["pos"] state.rotation = habitat_sim.utils.quat_from_coeffs( render_state["rot"]) sim.initialize_agent(0, state) obs = sim.step("move_forward") assert sensor_type in obs, f"{sensor_type} not in obs" gt = np.load( osp.abspath( osp.join( osp.dirname(__file__), "gt_data", "{}-{}.npy".format(osp.basename(osp.splitext(scene)[0]), sensor_type), ))) if gpu2gpu: import torch for k, v in obs.items(): if torch.is_tensor(v): obs[k] = v.cpu().numpy() # Different GPUs and different driver version will produce slightly different images assert np.linalg.norm(obs[sensor_type].astype(np.float) - gt.astype(np.float)) < 1.5e-2 * np.linalg.norm( gt.astype( np.float)), f"Incorrect {sensor_type} output"
def test_sensors( scene, sensor_type, gpu2gpu, frustum_culling, add_sensor_lazy, make_cfg_settings, ): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) if not habitat_sim.cuda_enabled and gpu2gpu: pytest.skip("Skipping GPU->GPU test") # We only support adding more RGB Sensors if one is already in a scene # We can add depth sensors whenever add_sensor_lazy = add_sensor_lazy and all_base_sensor_types[1] == sensor_type for sens in all_base_sensor_types: if add_sensor_lazy: make_cfg_settings[sens] = ( sens in all_base_sensor_types[:2] and sens != sensor_type ) else: make_cfg_settings[sens] = False make_cfg_settings[sensor_type] = True make_cfg_settings["scene"] = scene make_cfg_settings["frustum_culling"] = frustum_culling cfg = make_cfg(make_cfg_settings) if add_sensor_lazy: additional_sensors = cfg.agents[0].sensor_specifications[1:] cfg.agents[0].sensor_specifications = cfg.agents[0].sensor_specifications[:1] for sensor_spec in cfg.agents[0].sensor_specifications: sensor_spec.gpu2gpu_transfer = gpu2gpu with habitat_sim.Simulator(cfg) as sim: if add_sensor_lazy: obs: np.ndarray = sim.reset() assert len(obs) == 1, "Other sensors were not removed" for sensor_spec in additional_sensors: sim.add_sensor(sensor_spec) if sensor_type in all_exotic_sensor_types: obs = _render_scene(sim, scene, sensor_type, gpu2gpu) # Smoke Test. return obs, gt = _render_and_load_gt(sim, scene, sensor_type, gpu2gpu) # Different GPUs and different driver version will produce slightly # different images; differences on aliased edges might also stem from how a # particular importer parses transforms assert np.linalg.norm( obs[sensor_type].astype(np.float) - gt.astype(np.float) ) < 9.0e-2 * np.linalg.norm( gt.astype(np.float) ), f"Incorrect {sensor_type} output"
def test_set_custom_light_setup(make_cfg_settings): with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim: custom_setup_key = "custom_setup_key" light_setup = sim.get_light_setup(custom_setup_key) assert len(light_setup) == 0 light_setup.append(LightInfo(position=[1.0, 1.0, 1.0])) assert sim.get_light_setup() != light_setup sim.set_light_setup(light_setup, custom_setup_key) assert sim.get_light_setup(custom_setup_key) == light_setup
def test_smoke_no_sensors(make_cfg_settings): sims = [] for scene in _test_scenes: if not osp.exists(scene): continue make_cfg_settings = {k: v for k, v in make_cfg_settings.items()} make_cfg_settings["semantic_sensor"] = False make_cfg_settings["scene"] = scene cfg = make_cfg(make_cfg_settings) cfg.agents[0].sensor_specifications = [] sims.append(habitat_sim.Simulator(cfg))
def test_pose_extractors(make_cfg_settings): with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim: scene_filepath = "" pose_extractor_names = [ "closest_point_extractor", "panorama_extractor" ] for name in pose_extractor_names: extractor = ImageExtractor(scene_filepath, img_size=(32, 32), sim=sim, pose_extractor_name=name) assert len(extractor) > 1
def test_set_custom_light_setup(make_cfg_settings): with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim: custom_setup_key = "custom_setup_key" light_setup = sim.get_light_setup(custom_setup_key) assert len(light_setup) == 0 # define a point light (w == 1) light_setup.append(LightInfo(vector=[1.0, 1.0, 1.0, 1.0])) assert sim.get_light_setup() != light_setup sim.set_light_setup(light_setup, custom_setup_key) assert sim.get_light_setup(custom_setup_key) == light_setup
def test_set_default_light_setup(make_cfg_settings): with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim: light_setup = [LightInfo(position=[1.0, 1.0, 1.0])] sim.set_light_setup(light_setup) assert sim.get_light_setup() == light_setup # ensure modifications to local light setup variable are not reflected in sim light_setup[0].model = LightPositionModel.CAMERA assert sim.get_light_setup() != light_setup sim.set_light_setup(light_setup, DEFAULT_LIGHTING_KEY) assert sim.get_light_setup() == light_setup
def test_rgba_noise(scene, model_name, make_cfg_settings): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) make_cfg_settings["depth_sensor"] = False make_cfg_settings["color_sensor"] = True make_cfg_settings["semantic_sensor"] = False make_cfg_settings["scene"] = scene hsim_cfg = make_cfg(make_cfg_settings) hsim_cfg.agents[0].sensor_specifications[0].noise_model = model_name with habitat_sim.Simulator(hsim_cfg) as sim: obs, gt = _render_and_load_gt(sim, scene, "color_sensor", False) assert np.linalg.norm( obs["color_sensor"].astype(float) - gt.astype(float) ) > 1.5e-2 * np.linalg.norm(gt.astype(float)), "Incorrect color_sensor output"
def test_data_extractor_end_to_end(make_cfg_settings): # Path is relative to simulator.py with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim: scene_filepath = "" extractor = ImageExtractor(scene_filepath, labels=[0.0], img_size=(32, 32), sim=sim) dataset = MyDataset(extractor) dataloader = DataLoader(dataset, batch_size=3) net = TrivialNet() # Run data through network for sample_batch in dataloader: img, _ = sample_batch["rgba"], sample_batch["label"] img = img.permute(0, 3, 2, 1).float() net(img)
def test_sensors(scene, has_sem, sensor_type, sim, make_cfg_settings): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) make_cfg_settings = {k: v for k, v in make_cfg_settings.items()} make_cfg_settings["semantic_sensor"] = has_sem make_cfg_settings["scene"] = scene sim.reconfigure(make_cfg(make_cfg_settings)) with open( osp.abspath( osp.join( osp.dirname(__file__), "gt_data", "{}-state.json".format(osp.basename( osp.splitext(scene)[0])), )), "r", ) as f: render_state = json.load(f) state = habitat_sim.AgentState() state.position = render_state["pos"] state.rotation = habitat_sim.utils.quat_from_coeffs( render_state["rot"]) sim.initialize_agent(0, state) obs = sim.step("move_forward") assert sensor_type in obs, f"{sensor_type} not in obs" gt = np.load( osp.abspath( osp.join( osp.dirname(__file__), "gt_data", "{}-{}.npy".format(osp.basename(osp.splitext(scene)[0]), sensor_type), ))) # Different GPUs and different driver version will produce slightly different images assert np.linalg.norm(obs[sensor_type].astype(np.float) - gt.astype(np.float)) < 1.5e-2 * np.linalg.norm( gt.astype( np.float)), f"Incorrect {sensor_type} output"
def test_rgb_noise(scene, model_name, sim, make_cfg_settings): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) make_cfg_settings = {k: v for k, v in make_cfg_settings.items()} make_cfg_settings["depth_sensor"] = False make_cfg_settings["color_sensor"] = True make_cfg_settings["semantic_sensor"] = False make_cfg_settings["scene"] = scene hsim_cfg = make_cfg(make_cfg_settings) hsim_cfg.agents[0].sensor_specifications[0].noise_model = model_name sim.reconfigure(hsim_cfg) obs, gt = _render_and_load_gt(sim, scene, "color_sensor", False) assert np.linalg.norm( obs["color_sensor"].astype(np.float) - gt.astype(np.float) ) > 1.5e-2 * np.linalg.norm(gt.astype(np.float)), "Incorrect color_sensor output"
def test_sensors( scene, has_sem, sensor_type, gpu2gpu, frustum_culling, sim: habitat_sim.Simulator, make_cfg_settings, ): if not osp.exists(scene): pytest.skip("Skipping {}".format(scene)) if not habitat_sim.cuda_enabled and gpu2gpu: pytest.skip("Skipping GPU->GPU test") make_cfg_settings = {k: v for k, v in make_cfg_settings.items()} make_cfg_settings[ "semantic_sensor"] = has_sem and sensor_type == "semantic_sensor" make_cfg_settings["scene"] = scene make_cfg_settings["frustum_culling"] = frustum_culling cfg = make_cfg(make_cfg_settings) for sensor_spec in cfg.agents[0].sensor_specifications: sensor_spec.gpu2gpu_transfer = gpu2gpu # The scene loading may be done differently with/without frustum culling, # thus we need to force a reload when frustum culling gets swapped if cfg.sim_cfg.frustum_culling != sim.config.sim_cfg.frustum_culling: sim.close() sim.reconfigure(cfg) obs, gt = _render_and_load_gt(sim, scene, sensor_type, gpu2gpu) # Different GPUs and different driver version will produce slightly # different images; differences on aliased edges might also stem from how a # particular importer parses transforms assert np.linalg.norm(obs[sensor_type].astype(np.float) - gt.astype(np.float)) < 9.0e-2 * np.linalg.norm( gt.astype( np.float)), f"Incorrect {sensor_type} output"
def sim(make_cfg_settings): return habitat_sim.Simulator(make_cfg(make_cfg_settings))
def test_topdown_view(make_cfg_settings): with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim: tdv = TopdownView(sim, height=0.0, meters_per_pixel=0.1) topdown_view = tdv.topdown_view assert type(topdown_view) == np.ndarray
def test_get_no_light_setup(make_cfg_settings): with habitat_sim.Simulator(make_cfg(make_cfg_settings)) as sim: assert len(sim.get_light_setup(NO_LIGHT_KEY)) == 0