Esempio n. 1
0
    def depth_to_pc(self, depth_img, pad=0.05):
        """Transforms the depth image into a point cloud representation.

        Args:
            depth_img (array): depth image.
        Returns:
            Point cloud representation of hand.
        """

        # Convert to point cloud
        depth_img = torch.from_numpy(depth_img).unsqueeze(0)
        p_pixel = get_point_cloud(depth_img, self.num_points, 0)
        p_pixel = p_pixel.squeeze(0).numpy()

        # Convert to [-1, 1] cube
        min_vals = p_pixel.min(0)
        max_vals = p_pixel.max(0)
        diff = np.abs(max_vals - min_vals)
        center = (max_vals + min_vals) / 2
        scale = diff[:2].max() * (0.5 + pad)
        p_norm = p_pixel - center
        p_norm /= scale
        p_norm[:, 2] += 0.5

        return p_norm
Esempio n. 2
0
    def depth_to_pc(self, depth_img, bbox, padding):
        """Transforms the depth image into a point cloud representation.

        Args:
            depth_img (array): depth image.
            bbox (float, array): bounding box of the hand in (u, v, d).
            padding (int, array): row and column padding added to the cropped
              image from earlier pre-processing.
        Returns:
            Point cloud representation of hand.
        """

        xstart = bbox[0] - padding[1][0]
        ystart = bbox[2] - padding[0][0]

        # Convert to point cloud
        depth_img = torch.from_numpy(depth_img).unsqueeze(0)
        p_ndc = get_point_cloud(depth_img, 1024, 0)
        p_ndc = p_ndc.squeeze(0).numpy()
        p_ndc[:, 0] += xstart
        p_ndc[:, 1] += ystart
        pc = self.uvd_to_xyz(p_ndc, 480, 640)

        return pc