예제 #1
0
    def __call__(self, ros_point_cloud2: PointCloud2) -> torch.Tensor:

        # Convert to Open3D PointCloud
        open3d_point_cloud = conversions.pointcloud2_to_open3d(
            ros_point_cloud2=ros_point_cloud2)

        # Preprocess point cloud (transform to robot frame, crop to workspace and estimate normals)
        open3d_point_cloud = self.preprocess_point_cloud(
            open3d_point_cloud=open3d_point_cloud,
            camera_frame_id=ros_point_cloud2.header.frame_id,
            robot_frame_id=self._robot_frame_id,
            min_bound=self._min_bound,
            max_bound=self._max_bound,
            normals_radius=self._normals_radius,
            normals_max_nn=self._normals_max_nn)

        # Draw if needed
        if self._debug_draw:
            open3d.visualization.draw_geometries([
                open3d_point_cloud,
                open3d.geometry.TriangleMesh.create_coordinate_frame(
                    size=0.2, origin=[0.0, 0.0, 0.0])
            ],
                                                 point_show_normal=True)

        # Construct octree from such point cloud
        octree = self.construct_octree(open3d_point_cloud,
                                       include_color=self._include_color)

        # Write if needed
        if self._debug_write_octree:
            ocnn.write_octree(octree, 'octree.octree')

        return octree
예제 #2
0
    def predict(
        self,
        observation: np.ndarray,
        state: Optional[np.ndarray] = None,
        mask: Optional[np.ndarray] = None,
        deterministic: bool = False,
    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """
        Overriden to create proper Octree batch.
        Get the policy action and state from an observation (and optional state).

        :param observation: the input observation
        :param state: The last states (can be None, used in recurrent policies)
        :param mask: The last masks (can be None, used in recurrent policies)
        :param deterministic: Whether or not to return deterministic actions.
        :return: the model's action and the next state
            (used in recurrent policies)
        """
        if isinstance(observation, dict):
            observation = ObsDictWrapper.convert_dict(observation)
        else:
            observation = np.array(observation)

        vectorized_env = is_vectorized_observation(observation,
                                                   self.observation_space)

        if self._debug_write_octree:
            ocnn.write_octree(th.from_numpy(observation[-1]), 'octree.octree')

        # Make batch out of tensor (consisting of n-stacked octrees)
        octree_batch = preprocess_stacked_octree_batch(
            observation,
            self.device,
            separate_batches=self._separate_networks_for_stacks)

        with th.no_grad():
            actions = self._predict(octree_batch, deterministic=deterministic)
        # Convert to numpy
        actions = actions.cpu().numpy()

        if isinstance(self.action_space, gym.spaces.Box):
            if self.squash_output:
                # Rescale to proper domain when using squashing
                actions = self.unscale_action(actions)
            else:
                # Actions could be on arbitrary scale, so clip the actions to avoid
                # out of bound error (e.g. if sampling from a Gaussian distribution)
                actions = np.clip(actions, self.action_space.low,
                                  self.action_space.high)

        if not vectorized_env:
            if state is not None:
                raise ValueError(
                    "Error: The environment must be vectorized when using recurrent policies."
                )
            actions = actions[0]

        return actions, state