示例#1
0
def test_vector_to_camera_view(location, expected):
    from pylot.drivers.sensor_setup import CameraSetup
    camera_setup = CameraSetup('test_camera',
                               'sensor.camera.rgb',
                               width=1920,
                               height=1080,
                               transform=Transform(Location(), Rotation()))
    location = Location(*location)
    assert all(
        np.isclose(
            location.to_camera_view(
                camera_setup.get_extrinsic_matrix(),
                camera_setup.get_intrinsic_matrix()).as_numpy_array(),
            expected)), "The camera transformation was not as expected."
示例#2
0
    def get_pixel_location(self, pixel, camera_setup: CameraSetup):
        """ Gets the 3D world location from pixel coordinates.

        Args:
            pixel (:py:class:`~pylot.utils.Vector2D`): Pixel coordinates.
            camera_setup (:py:class:`~pylot.drivers.sensor_setup.CameraSetup`):
                The setup of the camera with its transform in the world frame
                of reference.

        Returns:
            :py:class:`~pylot.utils.Location`: The 3D world location, or None
            if all the point cloud points are behind.
        """
        # Select only points that are in front.
        # Setting the threshold to 0.1 because super close points cause
        # floating point errors.
        fwd_points = self.points[np.where(self.points[:, 2] > 0.1)]
        if len(fwd_points) == 0:
            return None
        intrinsic_mat = camera_setup.get_intrinsic_matrix()
        # Project our 2D pixel location into 3D space, onto the z=1 plane.
        p3d = np.dot(inv(intrinsic_mat), np.array([[pixel.x], [pixel.y],
                                                   [1.0]]))

        if self._lidar_setup.lidar_type == 'sensor.lidar.ray_cast':
            location = PointCloud.get_closest_point_in_point_cloud(
                fwd_points, Vector2D(p3d[0], p3d[1]), normalized=True)
            # Use the depth from the retrieved location.
            p3d *= np.array([location.z])
            p3d = p3d.transpose()
            # Convert from camera to unreal coordinates if the lidar type is
            # sensor.lidar.ray_cast
            to_world_transform = camera_setup.get_unreal_transform()
            camera_point_cloud = to_world_transform.transform_points(p3d)[0]
            pixel_location = Location(camera_point_cloud[0],
                                      camera_point_cloud[1],
                                      camera_point_cloud[2])
        elif self._lidar_setup.lidar_type == 'velodyne':
            location = PointCloud.get_closest_point_in_point_cloud(
                fwd_points, Vector2D(p3d[0], p3d[1]), normalized=False)
            # Use the depth from the retrieved location.
            p3d[2] = location.z
            p3d = p3d.transpose()
            pixel_location = Location(p3d[0, 0], p3d[0, 1], p3d[0, 2])
        return pixel_location