def test_make_coeff_matrix(world_points, ctrl_indices):
    C = make_coeff_matrix(world_points, ctrl_indices)

    control_points = util.col_slice(world_points, ctrl_indices)

    print "C = %s" % C
    print "world_points.T = %s" % world_points.T 
    print "C*control_points.T = %s" % (C * control_points.T)
def solve(world_points_in, image_points_in, pixel_scale, annotate_image=None):
    """
    Find a camera's orientation given a set of world coordinates and
    corresponding set of camera coordinates.

    world_points: Dict mapping point names to triples corresponding with world
                  x, y, z coordinates.
    image_points: Dict mapping point names to triples corresponding with
                  camera x, y coordinates. Coordinates are translated such that
                  0, 0 corresponds with the centre of the image.

    Return: 4x4 matrix representing the camera's orientation.
    """

    assert set(world_points_in.keys()) >= set(image_points_in.keys())
    keys = sorted(list(image_points_in.keys()))
    assert len(keys) >= 4
    world_points = hstack([matrix(list(world_points_in[k])).T for k in keys])
    image_points = hstack([matrix(list(image_points_in[k]) + [pixel_scale]).T for k in keys])
    image_points = image_points / pixel_scale

    control_indices = choose_control_points(world_points)
    C = make_coeff_matrix(world_points, control_indices)
    M = make_M(image_points, C)

    eig_vals, eig_vecs = numpy.linalg.eig(M.T * M)
    V = eig_vecs.T[eig_vals.argsort()].T

    world_ctrl_points = util.col_slice(world_points, control_indices)
    b1 = calc_beta_case_1(V[:, :1], world_ctrl_points)
    b2 = calc_beta_case_2(V[:, :2], world_ctrl_points)
    b3 = calc_beta_case_3(V[:, :3], world_ctrl_points)
   
    outs = []
    errs = []
    for b in [b1, b2, b3]:
        x = V[:, :b.shape[1]] * b.T
        x = x.reshape((4, 3))

        R, offs = util.orientation_from_correspondences(world_ctrl_points, x.T)
        outs.append((R, offs))

        e = calc_reprojection_error(R, offs, world_points, image_points)
        print "Reprojection error = %f" % e
        errs.append(e)

    R, offs = outs[array(errs).argmin()]

    if annotate_image:
        P = hstack([R, offs])
        all_keys = list(world_points_in.keys())
        world_points_mat = hstack([matrix(list(world_points_in[k]) + [1.0]).T for k in all_keys])
        image_points_mat = P * world_points_mat
        image_points_mat = matrix([[r[0,0]/r[0,2], r[0,1]/r[0,2]] for r in image_points_mat.T]).T
        util.draw_points(annotate_image,
                         dict(zip(all_keys, list(image_points_mat.T))))
    
    return R, offs
def make_coeff_matrix(world_points, ctrl_indices):
    """
    Return a matrix C such that:
        world_points.T = C * control_points.T

    With each row of C summing to 1.
    """

    # First find coeffs r, s, t to satisfy:
    #    w - c[0] = r(c[1] - c[0]) + s(c[2] - c[0]) + t(c[3] - c[0])
    # Then rearrange to get: 
    #    w = (1 - r - s - t)*c[0] + r*c[1] + s*c[2] + t*c[3]
    control_points = util.col_slice(world_points, ctrl_indices)

    world_points = sub_all(world_points, control_points[:, 0:1])
    control_points = sub_all(control_points[:, 1:], control_points[:, 0:1])
    C = world_points.T * util.right_inverse(control_points.T)

    return matrix([[1. - r - s -t, r, s, t] for r, s, t in array(C)])
def solve(world_points_in, image_points_in, pixel_scale, annotate_image=None):
    """
    Find a camera's orientation and pixel scale given a set of world
    coordinates and corresponding set of camera coordinates.

    world_points: Dict mapping point names to triples corresponding with world
                  x, y, z coordinates.
    image_points: Array of dicts mapping point names to triples corresponding
                  with camera x, y coordinates. Coordinates are translated such
                  that 0, 0 corresponds with the centre of the image.
                  One array element per source image.
    annotate_images: Optional array of images to annotate with the fitted
                     points.

    Return: 4x4 matrix representing the camera's orientation, and a pixel
            pixel scale.
    """

    assert set(world_points_in.keys()) >= set(image_points_in.keys())
    keys = sorted(list(image_points_in.keys()))
    assert len(keys) >= 4

    world_points = hstack([matrix(list(world_points_in[k])).T for k in keys])
            
    # Choose a "good" set of 4 basis indices
    basis_indices = [0]
    basis_indices += [argmax([numpy.linalg.norm(world_points[:, i]) for i,k in enumerate(keys)])]
    def dist_from_line(idx):
        v = world_points[:, idx] - world_points[:, basis_indices[0]]
        d = world_points[:, basis_indices[1]] - world_points[:, basis_indices[0]]
        d = d / numpy.linalg.norm(d)
        v -= d * (d.T * v)[0, 0]
        return numpy.linalg.norm(v)
    basis_indices += [argmax([dist_from_line(i) for i,k in enumerate(keys)])]
    def dist_from_plane(idx):
        v = world_points[:, idx] - world_points[:, basis_indices[0]]
        a = world_points[:, basis_indices[1]] - world_points[:, basis_indices[0]]
        b = world_points[:, basis_indices[2]] - world_points[:, basis_indices[0]]
        d = matrix(cross(a.T, b.T).T)
        d = d / numpy.linalg.norm(d)
        return abs((d.T * v)[0, 0])
    basis_indices += [argmax([dist_from_plane(i) for i,k in enumerate(keys)])]
    basis_indices = map(keys.index, ['12a', '11a', '9a', '12b'])

    basis        = hstack(world_points[:, i] for i in basis_indices)
    image_points = hstack([matrix(list(image_points_in[k]) + [pixel_scale]).T for k in keys])
    image_points = image_points / pixel_scale

    print "Basis = %s" % [keys[i] for i in basis_indices]

    # Choose coeffs such that basis * coeffs = P
    # where P is world_points relative to the first basis vector
    def sub_origin(M):
        return M - hstack([basis[:, :1]] * M.shape[1])
    coeffs = sub_origin(basis[:, 1:]).I * sub_origin(world_points)

    # Compute matrix M and such that M * [z0, z1, z2, ... zN] = 0
    # where zi are proportional to the  Z-value of the i'th image point
    def M_for_image_point(idx):
        assert idx not in basis_indices

        out = matrix(zeros((3, len(keys))))

        # Set d,e,f st:
        #   d * (b[1] - b[0]) + e * (b[2] - b[0]) + f * (b[3] - b[0]) =
        #       world_points[idx] - b[0]
        d, e, f = [coeffs[i, key_idx] for i in [0,1,2]]

        out[:, basis_indices[0]:][:,:1] =  (1 - d - e - f) * image_points[:,basis_indices[0]:][:,:1]
        out[:, basis_indices[1]:][:,:1] =  d * image_points[:,basis_indices[1]][:,:1]
        out[:, basis_indices[2]:][:,:1] =  e * image_points[:,basis_indices[2]][:,:1]
        out[:, basis_indices[3]:][:,:1] =  f * image_points[:,basis_indices[3]][:,:1]
        out[:, idx:][:,:1]              = -image_points[:,idx][:,:1]

        return out
    M = vstack([M_for_image_point(key_idx)
                    for key_idx in xrange(len(keys))
                    if key_idx not in basis_indices])

    # Solve for Z by taking the eigenvector corresponding with the smallest
    # eigenvalue.
    eig_vals, eig_vecs = numpy.linalg.eig(M.T * M)
    Z = (eig_vecs.T[eig_vals.argmin()]).T
    print "Eig vecs: %s" % repr(eig_vecs)
    print "Eig vals: %s" % repr(eig_vals)
    print "Min idx: %d" % eig_vals.argmin()
    print "Z = %s" % repr(Z)

    print "M * Z = %s" % repr(M*Z)

    # Project points. The scale of the projected points will be wrong, and the
    # orientation is still unknown.
    camera_points = matrix(array(image_points) * array(vstack([Z.T] * 3)))

    print "Coeffs: %s" % repr(coeffs)
    print "Projected basis: %s" % repr(util.col_slice(camera_points, basis_indices + range(len(keys))))
    print "World basis: %s" % repr(util.col_slice(world_points, basis_indices + range(len(keys))))

    if annotate_image:
        image_points_mat = matrix([[r[0,0]/r[0,2], r[0,1]/r[0,2]] for r in camera_points.T]).T
        image_points_mat *= pixel_scale
        util.draw_points(annotate_image,
                dict(zip(["%f" % Z[i,0] for i in xrange(Z.shape[0])], list(image_points_mat.T))))

    # Compute the rotation and scale from world space to camera space.
    def sub_first(M):
        return M - hstack([M[:, basis_indices[0]:][:,:1]] * M.shape[1])
    P = sub_first(camera_points) * util.right_inverse(sub_first(world_points))

    K, R = map(matrix, scipy.linalg.rq(P))
    for i in xrange(3):
        if K[i,i] < 0:
            R[i:(i+1), :] = -R[i:(i+1), :]
            K[:, i:(i+1)] = -K[:, i:(i+1)]
    print "P = %s" % repr(P)
    print "K = %s" % repr(K)
    print "R = %s" % repr(R)

    scale = 3.0 / sum(K[i,i] for i in xrange(3))
    t = scale * camera_points[:, basis_indices[0]:][:, :1] - R * world_points[:, basis_indices[0]:][:, :1]
    P = hstack((R, t))

    # Annotate the image, if we've been asked to do so.
    if False and annotate_image:
        all_keys = list(world_points_in.keys())
        world_points_mat = hstack([matrix(list(world_points_in[k]) + [1.0]).T for k in all_keys])
        image_points_mat = P * world_points_mat
        image_points_mat = matrix([[r[0,0]/r[0,2], r[0,1]/r[0,2]] for r in image_points_mat.T]).T
        image_points_mat *= pixel_scale
        util.draw_points(annotate_image,
                         dict(zip(all_keys, list(image_points_mat.T))))

    return P