def get_observation(self):

        tgt = self._sensor_object.render_target

        if self._spec.gpu2gpu_transfer:
            with torch.cuda.device(self._buffer.device):
                if self._spec.sensor_type == hsim.SensorType.SEMANTIC:
                    tgt.read_frame_object_id_gpu(self._buffer.data_ptr())
                elif self._spec.sensor_type == hsim.SensorType.DEPTH:
                    tgt.read_frame_depth_gpu(self._buffer.data_ptr())
                else:
                    tgt.read_frame_rgba_gpu(self._buffer.data_ptr())

                return self._buffer.flip(0).clone()
        else:
            size = self._sensor_object.framebuffer_size

            if self._spec.sensor_type == hsim.SensorType.SEMANTIC:
                tgt.read_frame_object_id(
                    mn.MutableImageView2D(mn.PixelFormat.R32UI, size,
                                          self._buffer))
            elif self._spec.sensor_type == hsim.SensorType.DEPTH:
                tgt.read_frame_depth(
                    mn.MutableImageView2D(mn.PixelFormat.R32F, size,
                                          self._buffer))
            else:
                tgt.read_frame_rgba(
                    mn.MutableImageView2D(
                        mn.PixelFormat.RGBA8UNORM,
                        size,
                        self._buffer.reshape(self._spec.resolution[0], -1),
                    ))

            return np.flip(self._buffer, axis=0).copy()
Example #2
0
    def get_observation(self) -> Union[ndarray, "Tensor"]:

        tgt = self._sensor_object.render_target

        if self._spec.gpu2gpu_transfer:
            with torch.cuda.device(
                    self._buffer.device):  # type: ignore[attr-defined]
                if self._spec.sensor_type == SensorType.SEMANTIC:
                    tgt.read_frame_object_id_gpu(
                        self._buffer.data_ptr())  # type: ignore[attr-defined]
                elif self._spec.sensor_type == SensorType.DEPTH:
                    tgt.read_frame_depth_gpu(
                        self._buffer.data_ptr())  # type: ignore[attr-defined]
                else:
                    tgt.read_frame_rgba_gpu(
                        self._buffer.data_ptr())  # type: ignore[attr-defined]

                obs = self._buffer.flip(0)
        else:
            size = self._sensor_object.framebuffer_size

            if self._spec.sensor_type == SensorType.SEMANTIC:
                tgt.read_frame_object_id(
                    mn.MutableImageView2D(mn.PixelFormat.R32UI, size,
                                          self._buffer))
            elif self._spec.sensor_type == SensorType.DEPTH:
                tgt.read_frame_depth(
                    mn.MutableImageView2D(mn.PixelFormat.R32F, size,
                                          self._buffer))
            else:
                tgt.read_frame_rgba(
                    mn.MutableImageView2D(
                        mn.PixelFormat.RGBA8_UNORM,
                        size,
                        self._buffer.reshape(self._spec.resolution[0], -1),
                    ))

            obs = np.flip(self._buffer, axis=0)

        return self._noise_model(obs)
Example #3
0
    def __init__(self, sim: Simulator, agent: Agent, sensor_id: str) -> None:
        self._sim = sim
        self._agent = agent

        # sensor is an attached object to the scene node
        # store such "attached object" in _sensor_object
        self._sensor_object = self._agent._sensors[sensor_id]

        self._spec = self._sensor_object.specification()

        if self._sim.renderer is not None:
            self._sim.renderer.bind_render_target(self._sensor_object)

        if self._spec.gpu2gpu_transfer:
            assert cuda_enabled, "Must build habitat sim with cuda for gpu2gpu-transfer"
            assert _HAS_TORCH
            device = torch.device(
                "cuda", self._sim.gpu_device)  # type: ignore[attr-defined]
            torch.cuda.set_device(device)

            resolution = self._spec.resolution
            if self._spec.sensor_type == SensorType.SEMANTIC:
                self._buffer: Union[np.ndarray,
                                    "Tensor"] = torch.empty(resolution[0],
                                                            resolution[1],
                                                            dtype=torch.int32,
                                                            device=device)
            elif self._spec.sensor_type == SensorType.DEPTH:
                self._buffer = torch.empty(resolution[0],
                                           resolution[1],
                                           dtype=torch.float32,
                                           device=device)
            else:
                self._buffer = torch.empty(resolution[0],
                                           resolution[1],
                                           4,
                                           dtype=torch.uint8,
                                           device=device)
        else:
            size = self._sensor_object.framebuffer_size
            if self._spec.sensor_type == SensorType.SEMANTIC:
                self._buffer = np.empty(
                    (self._spec.resolution[0], self._spec.resolution[1]),
                    dtype=np.uint32,
                )
                self.view = mn.MutableImageView2D(mn.PixelFormat.R32UI, size,
                                                  self._buffer)
            elif self._spec.sensor_type == SensorType.DEPTH:
                self._buffer = np.empty(
                    (self._spec.resolution[0], self._spec.resolution[1]),
                    dtype=np.float32,
                )
                self.view = mn.MutableImageView2D(mn.PixelFormat.R32F, size,
                                                  self._buffer)
            else:
                self._buffer = np.empty(
                    (
                        self._spec.resolution[0],
                        self._spec.resolution[1],
                        self._spec.channels,
                    ),
                    dtype=np.uint8,
                )
                self.view = mn.MutableImageView2D(
                    mn.PixelFormat.RGBA8_UNORM,
                    size,
                    self._buffer.reshape(self._spec.resolution[0], -1),
                )

        noise_model_kwargs = self._spec.noise_model_kwargs
        self._noise_model = make_sensor_noise_model(
            self._spec.noise_model,
            {
                "gpu_device_id": self._sim.gpu_device,
                **noise_model_kwargs
            },
        )
        assert self._noise_model.is_valid_sensor_type(
            self._spec.sensor_type
        ), "Noise model '{}' is not valid for sensor '{}'".format(
            self._spec.noise_model, self._spec.uuid)