Пример #1
0
def main():
    # World box.
    points_w = vg.utils.generate_box()

    # Define common camera.
    w = 640
    h = 480
    focal_lengths = 0.75 * h * np.ones((2, 1))
    principal_point = 0.5 * np.array([[w, h]]).T
    camera = PerspectiveCamera(focal_lengths, principal_point)

    # Generate a set of camera measurements.
    true_pose_w_c = PerspectiveCamera.looks_at_pose(np.array([[3, -4, 0]]).T, np.zeros((3, 1)), np.array([[0, 0, 1]]).T)
    measurement = PrecalibratedCameraMeasurementsFixedWorld.generate(camera, true_pose_w_c, points_w)

    # Construct model from measurements.
    model = PrecalibratedMotionOnlyBAObjective(measurement)

    # Perturb camera pose and use as initial state.
    init_pose_wc = true_pose_w_c + 0.3 * np.random.randn(6, 1)

    # Estimate pose in the world frame from point correspondences.
    x, cost, A, b = levenberg_marquardt(init_pose_wc, model)
    cov_x_final = np.linalg.inv(A.T @ A)

    # Print covariance.
    with np.printoptions(precision=3, suppress=True):
        print('Covariance:')
        print(cov_x_final)

    # Visualise
    visualise_moba(true_pose_w_c, points_w, measurement, x, cost)
Пример #2
0
    def _load_camera(config: Dict) -> Camera:
        '''
        Loads a Camera from the given Camera configuration.

        Args:
            config: Camera configuration.
        
        Returns:
            Camera instance as specified by the Camera configuration.
        '''
        assert 'Type' in config, 'Scope "Camera" in configuration file is missing key "Type".'
        if config['Type'] == 'Identity':
            return IdentityCamera()
        if config['Type'] == 'Perspective':
            for key in ('Position', 'Direction', 'Field of View', 'Resolution',
                        'Exposure'):
                assert key in config, f'Scope "Camera" in configuration file is missing key "{key}".'
            return PerspectiveCamera(
                position=torch.tensor(config['Position'],
                                      dtype=torch.float,
                                      device=utils.get_device_name()),
                direction=torch.tensor(config['Direction'],
                                       dtype=torch.float,
                                       device=utils.get_device_name()),
                field_of_view=torch.tensor(config['Field of View'],
                                           dtype=torch.float,
                                           device=utils.get_device_name()),
                resolution=torch.tensor(config['Resolution'],
                                        device=utils.get_device_name()),
                exposure=config['Exposure'])
        else:
            raise ValueError('Camera type "%s" is not supported.' %
                             config['Type'])
Пример #3
0
def main():
    # World box.
    true_points_w = vg.utils.generate_box()

    # Define common camera parameters.
    w = 640
    h = 480
    focal_lengths = 0.75 * h * np.ones((2, 1))
    principal_point = 0.5 * np.array([[w, h]]).T
    camera = PerspectiveCamera(focal_lengths, principal_point)

    # Define a set of cameras.
    true_poses_w_c = [
        PerspectiveCamera.looks_at_pose(
            np.array([[3, -4, 0]]).T, np.zeros((3, 1)),
            np.array([[0, 0, 1]]).T),
        PerspectiveCamera.looks_at_pose(
            np.array([[3, 4, 0]]).T, np.zeros((3, 1)),
            np.array([[0, 0, 1]]).T)
    ]

    # Generate a set of camera measurements.
    measurements = \
        [PrecalibratedCameraMeasurementsFixedCamera.generate(camera, pose, true_points_w) for pose in true_poses_w_c]

    # Construct model from measurements.
    model = PrecalibratedStructureOnlyBAObjective(measurements)

    # Perturb world points and use as initial state.
    init_points_w = [
        true_points_w[:, [i]] + 0.3 * np.random.randn(3, 1)
        for i in range(true_points_w.shape[1])
    ]
    init_state = CompositeStateVariable(init_points_w)

    # Estimate pose in the world frame from point correspondences.
    x, cost, A, b = levenberg_marquardt(init_state, model)
    cov_x_final = np.linalg.inv(A.T @ A)

    # Print covariance.
    with np.printoptions(precision=3, suppress=True):
        print('Covariance:')
        print(cov_x_final)

    # Visualise
    visualise_soba(true_poses_w_c, true_points_w, measurements, x, cost)
Пример #4
0
def demo(width, height, angle_deg, orthogonal, pfm_output, png_output):
    image = HdrImage(width, height)

    # Create a world and populate it with a few shapes
    world = World()

    for x in [-0.5, 0.5]:
        for y in [-0.5, 0.5]:
            for z in [-0.5, 0.5]:
                world.add(
                    Sphere(transformation=translation(Vec(x, y, z)) *
                           scaling(Vec(0.1, 0.1, 0.1))))

    # Place two other balls in the bottom/left part of the cube, so
    # that we can check if there are issues with the orientation of
    # the image
    world.add(
        Sphere(transformation=translation(Vec(0.0, 0.0, -0.5)) *
               scaling(Vec(0.1, 0.1, 0.1))))
    world.add(
        Sphere(transformation=translation(Vec(0.0, 0.5, 0.0)) *
               scaling(Vec(0.1, 0.1, 0.1))))

    # Initialize a camera
    camera_tr = rotation_z(angle_deg=angle_deg) * translation(
        Vec(-1.0, 0.0, 0.0))
    if orthogonal:
        camera = OrthogonalCamera(aspect_ratio=width / height,
                                  transformation=camera_tr)
    else:
        camera = PerspectiveCamera(aspect_ratio=width / height,
                                   transformation=camera_tr)

    # Run the ray-tracer

    tracer = ImageTracer(image=image, camera=camera)

    def compute_color(ray: Ray) -> Color:
        if world.ray_intersection(ray):
            return WHITE
        else:
            return BLACK

    tracer.fire_all_rays(compute_color)

    # Save the HDR image
    with open(pfm_output, "wb") as outf:
        image.write_pfm(outf)
    print(f"HDR demo image written to {pfm_output}")

    # Apply tone-mapping to the image
    image.normalize_image(factor=1.0)
    image.clamp_image()

    # Save the LDR image
    with open(png_output, "wb") as outf:
        image.write_ldr_image(outf, "PNG")
    print(f"PNG demo image written to {png_output}")
Пример #5
0
    def test_perspective_camera(self):
        cam = PerspectiveCamera(screen_distance=1.0, aspect_ratio=2.0)

        # Fire one ray for each corner of the image plane
        ray1 = cam.fire_ray(0.0, 0.0)
        ray2 = cam.fire_ray(1.0, 0.0)
        ray3 = cam.fire_ray(0.0, 1.0)
        ray4 = cam.fire_ray(1.0, 1.0)

        # Verify that all the rays depart from the same point
        assert ray1.origin.is_close(ray2.origin)
        assert ray1.origin.is_close(ray3.origin)
        assert ray1.origin.is_close(ray4.origin)

        # Verify that the ray hitting the corners have the right coordinates
        assert ray1.at(1.0).is_close(Point(0.0, 2.0, -1.0))
        assert ray2.at(1.0).is_close(Point(0.0, -2.0, -1.0))
        assert ray3.at(1.0).is_close(Point(0.0, 2.0, 1.0))
        assert ray4.at(1.0).is_close(Point(0.0, -2.0, 1.0))
Пример #6
0
    def generate(cls, camera: PerspectiveCamera, true_pose_w_c: SE3, true_points_w: np.ndarray):
        """Generate a 2D-3D measurement

        :param camera: A PerspectiveCamera representing the camera that performed the measurement.
        :param true_pose_w_c: The true pose of the camera in the world frame.
        :param true_points_w: The true world points.
        :return: The generated measurements
        """
        num_points = true_points_w.shape[1]

        # Generate observations in pixels.
        u = camera.project_to_pixel(true_pose_w_c.inverse() * true_points_w)
        covs_u = [np.diag(np.array([2, 2]) ** 2)] * num_points  # Same for all observations.

        # Add noise according to uncertainty.
        for c in range(num_points):
            u[:2, [c]] = u[:2, [c]] + np.random.multivariate_normal(np.zeros(2), covs_u[c]).reshape(-1, 1)

        # Construct measurement.
        return cls(camera, true_pose_w_c, u, covs_u)
Пример #7
0
    def __init__(self, camera: PerspectiveCamera, u: np.ndarray, covs_u: list):
        """Constructs the 2D-3D measurement

        :param camera: A PerspectiveCamera representing the camera that performed the measurement.
        :param u: A 2xn matrix of n pixel observations.
        :param covs_u: A list of covariance matrices representing the uncertainty in each pixel observation.
        """

        self.camera = camera

        # Transform to the normalised image plane.
        self.xn = camera.pixel_to_normalised(u)

        # Propagate uncertainty, and precompute square root of information matrices.
        self.num = u.shape[1]
        self.covs = [np.identity(2)] * self.num
        self.sqrt_inv_covs = [np.identity(2)] * self.num
        for c in range(self.num):
            self.covs[c] = self.camera.pixel_cov_to_normalised_com(covs_u[c])
            self.sqrt_inv_covs[c] = scipy.linalg.sqrtm(scipy.linalg.inv(self.covs[c]))
Пример #8
0
def parse_camera(input_file: InputStream, scene) -> Camera:
    expect_symbol(input_file, "(")
    type_kw = expect_keywords(
        input_file, [KeywordEnum.PERSPECTIVE, KeywordEnum.ORTHOGONAL])
    expect_symbol(input_file, ",")
    transformation = parse_transformation(input_file, scene)
    expect_symbol(input_file, ",")
    aspect_ratio = expect_number(input_file, scene)
    expect_symbol(input_file, ",")
    distance = expect_number(input_file, scene)
    expect_symbol(input_file, ")")

    if type_kw == KeywordEnum.PERSPECTIVE:
        result = PerspectiveCamera(screen_distance=distance,
                                   aspect_ratio=aspect_ratio,
                                   transformation=transformation)
    elif type_kw == KeywordEnum.ORTHOGONAL:
        result = OrthogonalCamera(aspect_ratio=aspect_ratio,
                                  transformation=transformation)

    return result
Пример #9
0
 def setUp(self) -> None:
     self.image = HdrImage(width=4, height=2)
     self.camera = PerspectiveCamera(aspect_ratio=2)
     self.tracer = ImageTracer(image=self.image, camera=self.camera)
Пример #10
0
    def test_perspective_camera_transform(self):
        cam = PerspectiveCamera(transformation=translation(-VEC_Y * 2.0) *
                                rotation_z(pi / 2.0))

        ray = cam.fire_ray(0.5, 0.5)
        assert ray.at(1.0).is_close(Point(0.0, -2.0, 0.0))