コード例 #1
0
ファイル: stardb.py プロジェクト: matthewearl/Telescope-Guide
def angles_to_vec(ra, dec):
    out = util.matrix_rotate_y(-ra) * \
            util.matrix_rotate_x(-dec) * \
            matrix([[0., 0., 1., 0]]).T

    return out[:3, :]
コード例 #2
0
ファイル: fit_lm.py プロジェクト: matthewearl/Telescope-Guide
def solve(world_points_in, image_points, annotate_images=None,
          initial_matrices=None, initial_bd=0., initial_ps=3000.,
          change_ps=False, change_bd=False, change_pos=True):
    """
    Find a camera's orientation and pixel scale given a set of world
    coordinates and corresponding set of camera coordinates.

    world_points: Map of point names to triples corresponding with world
                  (x, y, z) coordinates.
    image_points: Iterable of dicts mapping point names to pairs
                  corresponding with camera x, y coordinates. Coordinates
                  should be translated such that 0, 0 corresponds with the
                  centre of the image, and Y coordinates increase going top to
                  bottom.  One element per source image.
    annotate_images: Optional iterable of images to annotate with the fitted
                     points.
    initial_matrices: Optional iterable of initial rotation matrices.
    initial_bd: Optional initial barrel distortion to use.
    initial_ps: Optional initial pixel scale to use.
    change_ps: If True, allow the pixel scale (zoom) to be varied. Algorithm
               can be unstable if initial guess is inaccurate.
    change_bd: If True, allow the barrel distortion to be varied. Algorithm
               can be unstable if initial guess is inaccurate.
    change_pos: If True, allow the camera position to be varied.

    Return: 4x4 matrix representing the camera's orientation, and a pixel
            pixel scale.
    """

    assert all(set(world_points_in.keys()) >= set(p.keys()) for p in image_points)
    keys = [list(p.keys()) for p in image_points]
    world_points = [hstack([matrix(list(world_points_in[k]) + [1.0]).T for k in sub_keys]) for sub_keys in keys]
    image_points = hstack([hstack([matrix(p[k]).T for k in sub_keys]) for p, sub_keys in zip(image_points, keys)])

    print image_points

    if initial_matrices:
        current_mat = [m for m in initial_matrices]
    else:
        current_mat = [util.matrix_trans(0.0, 0.0, 500.0)] * len(keys)
    current_ps = initial_ps
    current_bd = initial_bd

    def camera_to_image(m, ps, bd):
        def map_point(c):
            px, py = calculate_barrel_distortion(bd, ps * c[0, 0] / c[0, 2], ps * c[0, 1] / c[0, 2])
            return [px, py]
        return matrix([map_point(c) for c in m.T]).T

    last_err_float = None
    while True:
        # Calculate the Jacobian
        camera_points = hstack([m * p for m, p in zip(current_mat, world_points)])
        err = image_points - camera_to_image(camera_points, current_ps, current_bd)
        J = make_jacobian(camera_points.T[:, :3], keys, current_ps, current_bd)
        if not change_ps:
            J = hstack([J[:, :-2], J[:, -1:]])
        if not change_bd:
            J = J[:, :-1]
        if not change_pos:
            for i in xrange(len(keys)):
                J[:, (6 * i):(6 * i + 3)] = zeros((J.shape[0], 3))

        # Invert the Jacobian and calculate the change in parameters.
        # Limit angle changes to avoid chaotic behaviour.
        err = err.T.reshape(2 * sum(len(sub_keys) for sub_keys in keys), 1)
        param_delta = numpy.linalg.pinv(J) * (STEP_FRACTION * err)

        # Calculate the error (as sum of squares), and abort if the error has
        # stopped decreasing.
        err_float = (err.T * err)[0, 0]
        print "Error: %f" % err_float
        print err
        if last_err_float != None and abs(err_float - last_err_float) < ERROR_CUTOFF:
            break
        last_err_float = err_float

        # Apply the parameter delta.
        for i in xrange(len(keys)):
            if change_pos:
                current_mat[i] = util.matrix_trans(param_delta[6 * i + 0, 0], param_delta[6 * i + 1, 0], param_delta[6 * i + 2, 0]) * current_mat[i]
            current_mat[i] = util.matrix_rotate_x(param_delta[6 * i + 3, 0]) * current_mat[i]
            current_mat[i] = util.matrix_rotate_y(param_delta[6 * i + 4, 0]) * current_mat[i]
            current_mat[i] = util.matrix_rotate_z(param_delta[6 * i + 5, 0]) * current_mat[i]
            matrix_normalize(current_mat[i])

        if change_ps:
            current_ps += param_delta[6 * len(keys), 0]

        if change_bd:
            current_bd += param_delta[6 * len(keys) + 1, 0]

    if annotate_images:
        all_keys = list(world_points_in.keys())
        all_world_points = hstack([matrix(list(world_points_in[k]) + [1.0]).T for k in all_keys])
        for i, annotate_image in enumerate(annotate_images):
            all_camera_points = current_mat[i] * all_world_points
            util.draw_points(annotate_image,
                             dict(zip(all_keys, camera_to_image(all_camera_points, current_ps, current_bd).T)))
    
    return current_mat, current_ps, current_bd