Example #1
0
    def apply_normalization_to_output_with_gradients(v,dv_dq,dv_di):
        # vn = v/mag(v)
        # dvn = dv (1/mag(v)) + v d(1/mag(v))
        #     = dv( 1/mag(v) - v vt / mag^3(v) )
        #     = dv( 1/mag(v) - vn vnt / mag(v) )
        #     = dv/mag(v) ( 1 - vn vnt )

        # v has shape (...,3)
        # dv_dq has shape (...,3,2)
        # dv_di has shape (...,3,N)

        # shape (...,1)
        magv_recip = 1. / nps.dummy(nps.mag(v), -1)
        v *= magv_recip

        # shape (...,1,1)
        magv_recip = nps.dummy(magv_recip,-1)
        dv_dq *= magv_recip

        dv_dq -= nps.xchg( nps.matmult( nps.dummy(nps.xchg(dv_dq, -1,-2), -2),
                                        nps.dummy(nps.outer(v,v),-3) )[...,0,:],
                           -1, -2)

        dv_di *= magv_recip

        dv_di -= nps.xchg( nps.matmult( nps.dummy(nps.xchg(dv_di, -1,-2), -2),
                                        nps.dummy(nps.outer(v,v),-3) )[...,0,:],
                           -1, -2)
Example #2
0
def _plot_args_points_and_covariance_ellipse(q, what):
    q_mean = np.mean(q, axis=-2)
    q_mean0 = q - q_mean
    Var = np.mean(nps.outer(q_mean0, q_mean0), axis=0)
    # Some functions assume that we're plotting _with = "dots". Look for the
    # callers if changing this
    return (_plot_arg_covariance_ellipse(q_mean, Var,
                                         what), (q,
                                                 dict(_with='dots',
                                                      tuplesize=-2)))
Example #3
0
def _compose_r(r0, r1, assume_r0_tiny=False, get_gradients=False):
    r'''Axis/angle rotation composition

THIS IS TEMPORARY. WILL BE REDONE IN C, WITH DOCS AND TESTS

Described here:

    Altmann, Simon L. "Hamilton, Rodrigues, and the Quaternion Scandal."
    Mathematics Magazine, vol. 62, no. 5, 1989, pp. 291–308

Available here:

    https://www.jstor.org/stable/2689481


I use Equations (19) and (20) on page 302 of this paper. These equations say
that

  R(angle=gamma, axis=n) =
  compose( R(angle=alpha, axis=l), R(angle=beta, axis=m) )

I need to compute gamma*n, and these are given as solutions to:

  cos(gamma/2) =
    cos(alpha/2)*cos(beta/2) -
    sin(alpha/2)*sin(beta/2) * inner(l,m)
  sin(gamma/2) n =
    sin(alpha/2)*cos(beta/2)*l +
    cos(alpha/2)*sin(beta/2)*m +
    sin(alpha/2)*sin(beta/2) * cross(l,m)

For nicer notation, I define

  A = alpha/2
  B = beta /2
  C = gamma/2

  l = r0 /(2A)
  m = r1 /(2B)
  n = r01/(2C)

I rewrite:

  cos(C) =
    cos(A)*cos(B) -
    sin(A)*sin(B) * inner(r0,r1) / 4AB
  sin(C) r01 / 2C =
    sin(A)*cos(B)*r0 / 2A +
    cos(A)*sin(B)*r1 / 2B +
    sin(A)*sin(B) * cross(r0,r1) / 4AB

If alpha ~ 0, I have A ~ 0, and I can simplify:

  cos(C) ~
    cos(B) -
    A*sin(B) * inner(r0,r1) / 4AB
  sin(C) r01 / 2C ~
    A*cos(B)* r0 / 2A +
    sin(B)  * r1 / 2B +
    A*sin(B) * cross(r0,r1) / 4AB

I have C = B + dB where dB ~ 0, so

  cos(C) ~ cos(B + dB) ~ cos(B) - dB sin(B)
  -> dB = A * inner(r0,r1) / 4AB = inner(r0,r1) / 4B
  -> C = B + inner(r0,r1) / 4B

Now let's look at the axis expression. Assuming A ~ 0

  sin(C) r01 / 2C ~

  A*cos(B) r0 / 2A +
  sin(B)   r1 / 2B +
  A*sin(B) * cross(r0,r1) / 4AB

->
  sin(C)/C * r01 ~

  cos(B) r0 +
  sin(B) r1 / B +
  sin(B) * cross(r0,r1) / 2B

I linearize the left-hand side:

  sin(C)/C = sin(B+dB)/(B+dB) ~
  ~ sin(B)/B + d( sin(B)/B )/de dB =
  = sin(B)/B + dB  (B cos(B) - sin(B)) / B^2

So

  (sin(B)/B + dB  (B cos(B) - sin(B)) / B^2) r01 ~

  cos(B) r0 +
  sin(B) r1 / B +
  sin(B) * cross(r0,r1) / 2B

->

  (sin(B) + dB  (B cos(B) - sin(B)) / B) r01 ~

  sin(B) r1 +
  cos(B)*B r0 +
  sin(B) * cross(r0,r1) / 2

I want to find the perturbation to give me r01 ~ r1 + deltar ->

  ( dB (B cos(B) - sin(B)) / B) r1 + (sin(B) + dB  (B cos(B) - sin(B)) / B) deltar ~

  cos(B)*B r0 +
  sin(B) * cross(r0,r1) / 2

All terms here are linear or quadratic in r0. For tiny r0, I can ignore the
quadratic terms:

  ( dB (B cos(B) - sin(B)) / B) r1 + sin(B) deltar ~

  cos(B)*B r0 +
  sin(B) * cross(r0,r1) / 2

I solve for deltar:

  deltar ~
  cos(B)/sin(B)*B r0 +
  cross(r0,r1) / 2 -
  ( dB (B cos(B)/sin(B) - 1) / B) r1

I substitute in the dB from above, and I simplify:

  deltar ~

  B/tan(B) r0 +
  cross(r0,r1) / 2 -
  ( inner(r0,r1) / 4B * (1/tan(B) - 1/B)) r1

And I differentiate:

  dr01/dr0 = ddeltar/dr0 =

  B/tan(B) I +
  -skew_symmetric(r1) / 2 -
  outer(r1,r1) / 4B * (1/tan(B) - 1/B)

    '''

    if not assume_r0_tiny:
        if get_gradients:
            raise Exception("Not implemented")
        A = nps.mag(r0) / 2
        B = nps.mag(r1) / 2

        cos_C = np.cos(A) * np.cos(B) - np.sin(A) * np.sin(B) * nps.inner(
            r0, r1) / (4 * A * B)
        sin_C = np.sqrt(1. - cos_C * cos_C)
        th01 = np.arctan2(sin_C, cos_C) * 2.

        sin_C__a01 = np.sin(A) * np.cos(B) * r0 / (
            2 * A) + np.cos(A) * np.sin(B) * r1 / (
                2 * B) + np.sin(A) * np.sin(B) * np.cross(r0, r1) / (4 * A * B)
        a01 = sin_C__a01 / sin_C
        return a01 * th01

    B = nps.mag(r1) / 2

    if nps.norm2(r0) != 0:
        # for broadcasting
        if isinstance(B, np.ndarray): Bs = nps.dummy(B, -1)
        else: Bs = B
        r01 = r1 + \
            Bs/np.tan(Bs) * r0 + \
            np.cross(r0,r1) / 2 - \
            ( nps.inner(r0,r1) / (4*Bs) * (1/np.tan(Bs) - 1/Bs))* r1
    else:
        r01 = r1

    if not get_gradients:
        return r01

    # for broadcasting
    if isinstance(B, np.ndarray): Bs = nps.dummy(B, -1, -1)
    else: Bs = B
    dr01_dr0 = \
        Bs/np.tan(Bs) * np.eye(3) + \
        -mrcal.utils._skew_symmetric(r1) / 2 - \
        nps.outer(r1,r1) / (4*Bs) * (1/np.tan(Bs) - 1/Bs)
    return r01, dr01_dr0
Example #4
0
# I want observations of shape (Nframes*Ncameras, Nh, Nw, 3) where each row is
# (x,y,weight)
observations_ref = nps.clump(nps.glue(q_ref, nps.dummy(weight, -1), axis=-1),
                             n=2)

# These are perfect
intrinsics_ref = nps.cat(*[m.intrinsics()[1] for m in models_ref])
extrinsics_ref = nps.cat(*[m.extrinsics_rt_fromref() for m in models_ref[1:]])
if extrinsics_ref.size == 0:
    extrinsics_ref = np.zeros((0, 6), dtype=float)
frames_ref = mrcal.rt_from_Rt(Rt_ref_board_ref)

# Dense observations. All the cameras see all the boards
indices_frame_camera = np.zeros((Nframes * Ncameras, 2), dtype=np.int32)
indices_frame = indices_frame_camera[:, 0].reshape(Nframes, Ncameras)
indices_frame.setfield(nps.outer(np.arange(Nframes, dtype=np.int32),
                                 np.ones((Ncameras, ), dtype=np.int32)),
                       dtype=np.int32)
indices_camera = indices_frame_camera[:, 1].reshape(Nframes, Ncameras)
indices_camera.setfield(nps.outer(np.ones((Nframes, ), dtype=np.int32),
                                  np.arange(Ncameras, dtype=np.int32)),
                        dtype=np.int32)

indices_frame_camintrinsics_camextrinsics = \
    nps.glue(indices_frame_camera,
             indices_frame_camera[:,(1,)] - 1,
             axis=-1)

# Add a bit of noise to make my baseline not perfect
_, observations_baseline = sample_dqref(observations_ref,
                                        pixel_uncertainty_stdev)
baseline = \
Example #5
0
def get_point_cov_plot_args(q, what):
    q_mean = np.mean(q, axis=-2)
    q_mean0 = q - q_mean
    Var = np.mean(nps.outer(q_mean0, q_mean0), axis=0)
    return get_cov_plot_args(q_mean, Var, what)
Example #6
0
def check_uncertainties_at(q0_baseline, idistance):

    distance = args.distances[idistance]

    # distance of "None" means I'll simulate a large distance, but compare
    # against a special-case distance of "infinity"
    if distance is None:
        distance = 1e5
        atinfinity = True
        distancestr = "infinity"
    else:
        atinfinity = False
        distancestr = str(distance)

    # shape (Ncameras,3)
    p_cam_baseline = mrcal.unproject(
        q0_baseline, lensmodel, intrinsics_baseline, normalize=True) * distance

    # shape (Nsamples, Ncameras, 2)
    q_sampled = \
        reproject_perturbed(q0_baseline,
                            distance,

                            intrinsics_baseline,
                            extrinsics_baseline_mounted,
                            frames_baseline,
                            calobject_warp_baseline,

                            intrinsics_sampled,
                            extrinsics_sampled_mounted,
                            frames_sampled,
                            calobject_warp_sampled)

    # shape (Ncameras, 2)
    q_sampled_mean = np.mean(q_sampled, axis=-3)

    # shape (Ncameras, 2,2)
    Var_dq_observed = np.mean(nps.outer(q_sampled - q_sampled_mean,
                                        q_sampled - q_sampled_mean),
                              axis=-4)

    # shape (Ncameras)
    worst_direction_stdev_observed = mrcal.worst_direction_stdev(
        Var_dq_observed)

    # shape (Ncameras, 2,2)
    Var_dq = \
        nps.cat(*[ mrcal.projection_uncertainty( \
            p_cam_baseline[icam],
            atinfinity = atinfinity,
            model      = models_baseline[icam]) \
                   for icam in range(args.Ncameras) ])
    # shape (Ncameras)
    worst_direction_stdev_predicted = mrcal.worst_direction_stdev(Var_dq)

    # q_sampled should be evenly distributed around q0_baseline. I can make eps
    # as tight as I want by increasing Nsamples
    testutils.confirm_equal(
        nps.mag(q_sampled_mean - q0_baseline),
        0,
        eps=0.3,
        worstcase=True,
        msg=
        f"Sampled projections cluster around the sample point at distance = {distancestr}"
    )

    # I accept 20% error. This is plenty good-enough. And I can get tighter matches
    # if I grab more samples
    testutils.confirm_equal(
        worst_direction_stdev_observed,
        worst_direction_stdev_predicted,
        eps=0.2,
        worstcase=True,
        relative=True,
        msg=
        f"Predicted worst-case projections match sampled observations at distance = {distancestr}"
    )

    # I now compare the variances. The cross terms have lots of apparent error,
    # but it's more meaningful to compare the eigenvectors and eigenvalues, so I
    # just do that

    # First, the thing is symmetric, right?
    testutils.confirm_equal(
        nps.transpose(Var_dq),
        Var_dq,
        worstcase=True,
        msg=f"Var(dq) is symmetric at distance = {distancestr}")

    for icam in range(args.Ncameras):
        l_predicted, v = sorted_eig(Var_dq[icam])
        v0_predicted = v[:, 0]

        l_observed, v = sorted_eig(Var_dq_observed[icam])
        v0_observed = v[:, 0]

        testutils.confirm_equal(
            l_observed,
            l_predicted,
            eps=0.35,  # high error tolerance. Nsamples is too low for better
            worstcase=True,
            relative=True,
            msg=
            f"Var(dq) eigenvalues match for camera {icam} at distance = {distancestr}"
        )

        if icam == 3:
            # I only check the eigenvectors for camera 3. The other cameras have
            # isotropic covariances, so the eigenvectors aren't well defined. If
            # one isn't isotropic for some reason, the eigenvalue check will
            # fail
            testutils.confirm_equal(
                np.arcsin(nps.mag(np.cross(v0_observed, v0_predicted))) *
                180. / np.pi,
                0,
                eps=15,  # high error tolerance. Nsamples is too low for better
                worstcase=True,
                msg=
                f"Var(dq) eigenvectors match for camera {icam} at distance = {distancestr}"
            )

            # I don't bother checking v1. I already made sure the matrix is
            # symmetric. Thus the eigenvectors are orthogonal, so any angle offset
            # in v0 will be exactly the same in v1

    return q_sampled, Var_dq
Example #7
0
def image_transformation_map(model_from,
                             model_to,
                             intrinsics_only=False,
                             distance=None,
                             plane_n=None,
                             plane_d=None,
                             mask_valid_intrinsics_region_from=False):
    r'''Compute a reprojection map between two models

SYNOPSIS

    model_orig = mrcal.cameramodel("xxx.cameramodel")
    image_orig = cv2.imread("image.jpg")

    model_pinhole = mrcal.pinhole_model_for_reprojection(model_orig,
                                                         fit = "corners")

    mapxy = mrcal.image_transformation_map(model_orig, model_pinhole,
                                           intrinsics_only = True)

    image_undistorted = mrcal.transform_image(image_orig, mapxy)

    # image_undistorted is now a pinhole-reprojected version of image_orig

Returns the transformation that describes a mapping

- from pixel coordinates of an image of a scene observed by model_to
- to pixel coordinates of an image of the same scene observed by model_from

This transformation can then be applied to a whole image by calling
mrcal.transform_image().

This function returns a transformation map in an (Nheight,Nwidth,2) array. The
image made by model_to will have shape (Nheight,Nwidth). Each pixel (x,y) in
this image corresponds to a pixel mapxy[y,x,:] in the image made by model_from.

One application of this function is to validate the models in a stereo pair. For
instance, reprojecting one camera's image at distance=infinity should produce
exactly the same image that is observed by the other camera when looking at very
far objects, IF the intrinsics and rotation are correct. If the images don't
line up well, we know that some part of the models is off. Similarly, we can use
big planes (such as observations of the ground) and plane_n, plane_d to validate.

This function has several modes of operation:

- intrinsics, extrinsics

  Used if not intrinsics_only and \
          plane_n is None     and \
          plane_d is None

  This is the default. For each pixel in the output, we use the full model to
  unproject a given distance out, and then use the full model to project back
  into the other camera.

- intrinsics only

  Used if intrinsics_only and \
          plane_n is None and \
          plane_d is None

  Similar, but the extrinsics are ignored. We unproject the pixels in one model,
  and project the into the other camera. The two camera coordinate systems are
  assumed to line up perfectly

- plane

  Used if plane_n is not None and
          plane_d is not None

  We map observations of a given plane in camera FROM coordinates coordinates to
  where this plane would be observed by camera TO. We unproject each pixel in
  one camera, compute the intersection point with the plane, and project that
  intersection point back to the other camera. This uses ALL the intrinsics,
  extrinsics and the plane representation. The plane is represented by a normal
  vector plane_n, and the distance to the normal plane_d. The plane is all
  points p such that inner(p,plane_n) = plane_d. plane_n does not need to be
  normalized; any scaling is compensated in plane_d.

ARGUMENTS

- model_from: the mrcal.cameramodel object describing the camera used to capture
  the input image. We always use the intrinsics. if not intrinsics_only: we use
  the extrinsics also

- model_to: the mrcal.cameramodel object describing the camera that would have
  captured the image we're producing. We always use the intrinsics. if not
  intrinsics_only: we use the extrinsics also

- intrinsics_only: optional boolean, defaulting to False. If False: we respect
  the relative transformation in the extrinsics of the camera models.

- distance: optional value, defaulting to None. Used only if not
  intrinsics_only. We reproject points in space a given distance out. If
  distance is None (the default), we look out to infinity. This is equivalent to
  using only the rotation component of the extrinsics, ignoring the translation.

- plane_n: optional numpy array of shape (3,); None by default. If given, we
  produce a transformation to map observations of a given plane to the same
  pixels in the source and target images. This argument describes the normal
  vector in the coordinate system of model_from. The plane is all points p such
  that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any
  scaling is compensated in plane_d. If given, plane_d should be given also, and
  intrinsics_only should be False. if given, we use the full intrinsics and
  extrinsics of both camera models

- plane_d: optional floating-point value; None by default. If given, we produce
  a transformation to map observations of a given plane to the same pixels in
  the source and target images. The plane is all points p such that
  inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any
  scaling is compensated in plane_d. If given, plane_n should be given also, and
  intrinsics_only should be False. if given, we use the full intrinsics and
  extrinsics of both camera models

- mask_valid_intrinsics_region_from: optional boolean defaulting to False. If
  True, points outside the valid-intrinsics region in the FROM image are set to
  black, and thus do not appear in the output image

RETURNED VALUE

A numpy array of shape (Nheight,Nwidth,2) where Nheight and Nwidth represent the
imager dimensions of model_to. This array contains 32-bit floats, as required by
cv2.remap() (the function providing the internals of mrcal.transform_image()).
This array can be passed to mrcal.transform_image()

    '''

    if (plane_n is      None and plane_d is not None) or \
       (plane_n is not  None and plane_d is     None):
        raise Exception(
            "plane_n and plane_d should both be None or neither should be None"
        )
    if plane_n is not None and \
       intrinsics_only:
        raise Exception(
            "We're looking at remapping a plane (plane_d, plane_n are not None), so intrinsics_only should be False"
        )

    if distance is not None and \
       (plane_n is not None or intrinsics_only):
        raise Exception(
            "'distance' makes sense only without plane_n/plane_d and without intrinsics_only"
        )

    if intrinsics_only:
        Rt_to_from = None
    else:
        Rt_to_from = mrcal.compose_Rt(model_to.extrinsics_Rt_fromref(),
                                      model_from.extrinsics_Rt_toref())

    lensmodel_from, intrinsics_data_from = model_from.intrinsics()
    lensmodel_to, intrinsics_data_to = model_to.intrinsics()

    if re.match("LENSMODEL_OPENCV",lensmodel_from) and \
       lensmodel_to == "LENSMODEL_PINHOLE"         and \
       plane_n is None                             and \
       not mask_valid_intrinsics_region_from       and \
       distance is None:

        # This is a common special case. This branch works identically to the
        # other path (the other side of this "if" can always be used instead),
        # but the opencv-specific code is optimized and at one point ran faster
        # than the code on the other side.
        #
        # The mask_valid_intrinsics_region_from isn't implemented in this path.
        # It COULD be, then this faster path could be used
        import cv2

        fxy_from = intrinsics_data_from[0:2]
        cxy_from = intrinsics_data_from[2:4]
        cameraMatrix_from = np.array(
            ((fxy_from[0], 0, cxy_from[0]), (0, fxy_from[1], cxy_from[1]),
             (0, 0, 1)))

        fxy_to = intrinsics_data_to[0:2]
        cxy_to = intrinsics_data_to[2:4]
        cameraMatrix_to = np.array(
            ((fxy_to[0], 0, cxy_to[0]), (0, fxy_to[1], cxy_to[1]), (0, 0, 1)))

        output_shape = model_to.imagersize()
        distortion_coeffs = intrinsics_data_from[4:]

        if Rt_to_from is not None:
            R_to_from = Rt_to_from[:3, :]
            if np.trace(R_to_from) > 3. - 1e-12:
                R_to_from = None  # identity, so I pass None
        else:
            R_to_from = None

        return nps.glue( *[ nps.dummy(arr,-1) for arr in \
                            cv2.initUndistortRectifyMap(cameraMatrix_from, distortion_coeffs,
                                                        R_to_from,
                                                        cameraMatrix_to, tuple(output_shape),
                                                        cv2.CV_32FC1)],
                         axis = -1)

    W_from, H_from = model_from.imagersize()
    W_to, H_to = model_to.imagersize()

    # shape: (Nheight,Nwidth,2). Contains (x,y) rows
    grid = np.ascontiguousarray(nps.mv(
        nps.cat(*np.meshgrid(np.arange(W_to), np.arange(H_to))), 0, -1),
                                dtype=float)
    v = mrcal.unproject(grid, lensmodel_to, intrinsics_data_to)

    if plane_n is not None:

        R_to_from = Rt_to_from[:3, :]
        t_to_from = Rt_to_from[3, :]

        # The homography definition. Derived in many places. For instance in
        # "Motion and structure from motion in a piecewise planar environment"
        # by Olivier Faugeras, F. Lustman.
        A_to_from = plane_d * R_to_from + nps.outer(t_to_from, plane_n)
        A_from_to = np.linalg.inv(A_to_from)
        v = nps.matmult(v, nps.transpose(A_from_to))

    else:
        if Rt_to_from is not None:
            if distance is not None:
                v = mrcal.transform_point_Rt(
                    mrcal.invert_Rt(Rt_to_from),
                    v / nps.dummy(nps.mag(v), -1) * distance)
            else:
                R_to_from = Rt_to_from[:3, :]
                v = nps.matmult(v, R_to_from)

    mapxy = mrcal.project(v, lensmodel_from, intrinsics_data_from)

    if mask_valid_intrinsics_region_from:

        # Using matplotlib to compute the out-of-bounds points. It doesn't
        # support broadcasting, so I do that manually with a clump/reshape
        from matplotlib.path import Path
        region = Path(model_from.valid_intrinsics_region())
        is_inside = region.contains_points(nps.clump(mapxy, n=2)).reshape(
            mapxy.shape[:2])
        mapxy[~is_inside, :] = -1

    return mapxy.astype(np.float32)
Example #8
0
def calibration_baseline(model,
                         Ncameras,
                         Nframes,
                         extra_observation_at,
                         object_width_n,
                         object_height_n,
                         object_spacing,
                         extrinsics_rt_fromref_true,
                         calobject_warp_true,
                         fixedframes,
                         testdir,
                         cull_left_of_center=False,
                         allow_nonidentity_cam0_transform=False,
                         range_to_boards=4.0):
    r'''Compute a calibration baseline as a starting point for experiments

This is a perfect, noiseless solve. Regularization IS enabled, and the returned
model is at the optimization optimum. So the returned models will not sit
exactly at the ground-truth.

NOTE: if not fixedframes: the ref frame in the returned
optimization_inputs_baseline is NOT the ref frame used by the returned
extrinsics and frames arrays. The arrays in optimization_inputs_baseline had to
be transformed to reference off camera 0. If the extrinsics of camera 0 are the
identity, then the two ref coord systems are the same. To avoid accidental bugs,
we have a kwarg allow_nonidentity_cam0_transform, which defaults to False. if
not allow_nonidentity_cam0_transform and norm(extrinsics_rt_fromref_true[0]) >
0: raise

This logic is here purely for safety. A caller that handles non-identity cam0
transforms has to explicitly say that

ARGUMENTS

- model: string. 'opencv4' or 'opencv8' or 'splined'

- ...

    '''

    if re.match('opencv', model):
        models_true = (
            mrcal.cameramodel(f"{testdir}/data/cam0.opencv8.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam0.opencv8.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam1.opencv8.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam1.opencv8.cameramodel"))

        if model == 'opencv4':
            # I have opencv8 models_true, but I truncate to opencv4 models_true
            for m in models_true:
                m.intrinsics(intrinsics=('LENSMODEL_OPENCV4',
                                         m.intrinsics()[1][:8]))
    elif model == 'splined':
        models_true = (
            mrcal.cameramodel(f"{testdir}/data/cam0.splined.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam0.splined.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam1.splined.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam1.splined.cameramodel"))
    else:
        raise Exception("Unknown lens being tested")

    models_true = models_true[:Ncameras]
    lensmodel = models_true[0].intrinsics()[0]
    Nintrinsics = mrcal.lensmodel_num_params(lensmodel)

    for i in range(Ncameras):
        models_true[i].extrinsics_rt_fromref(extrinsics_rt_fromref_true[i])

    if not allow_nonidentity_cam0_transform and \
       nps.norm2(extrinsics_rt_fromref_true[0]) > 0:
        raise Exception(
            "A non-identity cam0 transform was given, but the caller didn't explicitly say that they support this"
        )

    imagersizes = nps.cat(*[m.imagersize() for m in models_true])

    # These are perfect
    intrinsics_true = nps.cat(*[m.intrinsics()[1] for m in models_true])
    extrinsics_true_mounted = nps.cat(
        *[m.extrinsics_rt_fromref() for m in models_true])
    x_center = -(Ncameras - 1) / 2.

    # shapes (Nframes, Ncameras, Nh, Nw, 2),
    #        (Nframes, 4,3)
    q_true,Rt_ref_board_true = \
        mrcal.synthesize_board_observations(models_true,
                                            object_width_n, object_height_n, object_spacing,
                                            calobject_warp_true,
                                            np.array((0.,             0.,             0.,             x_center, 0,   range_to_boards)),
                                            np.array((np.pi/180.*30., np.pi/180.*30., np.pi/180.*20., 2.5,      2.5, range_to_boards/2.0)),
                                            Nframes)

    if extra_observation_at is not None:
        q_true_extra,Rt_ref_board_true_extra = \
            mrcal.synthesize_board_observations(models_true,
                                                object_width_n, object_height_n, object_spacing,
                                                calobject_warp_true,
                                                np.array((0.,             0.,             0.,             x_center, 0,   extra_observation_at)),
                                                np.array((np.pi/180.*30., np.pi/180.*30., np.pi/180.*20., 2.5,      2.5, extra_observation_at/10.0)),
                                                Nframes = 1)

        q_true = nps.glue(q_true, q_true_extra, axis=-5)
        Rt_ref_board_true = nps.glue(Rt_ref_board_true,
                                     Rt_ref_board_true_extra,
                                     axis=-3)

        Nframes += 1

    frames_true = mrcal.rt_from_Rt(Rt_ref_board_true)

    ############# I have perfect observations in q_true.
    # weight has shape (Nframes, Ncameras, Nh, Nw),
    weight01 = (np.random.rand(*q_true.shape[:-1]) + 1.) / 2.  # in [0,1]
    weight0 = 0.2
    weight1 = 1.0
    weight = weight0 + (weight1 - weight0) * weight01

    if cull_left_of_center:

        imagersize = models_true[0].imagersize()
        for m in models_true[1:]:
            if np.any(m.imagersize() - imagersize):
                raise Exception(
                    "I'm assuming all cameras have the same imager size, but this is false"
                )

        weight[q_true[..., 0] < imagersize[0] / 2.] /= 1000.

    # I want observations of shape (Nframes*Ncameras, Nh, Nw, 3) where each row is
    # (x,y,weight)
    observations_true = nps.clump(nps.glue(q_true,
                                           nps.dummy(weight, -1),
                                           axis=-1),
                                  n=2)

    # Dense observations. All the cameras see all the boards
    indices_frame_camera = np.zeros((Nframes * Ncameras, 2), dtype=np.int32)
    indices_frame = indices_frame_camera[:, 0].reshape(Nframes, Ncameras)
    indices_frame.setfield(nps.outer(np.arange(Nframes, dtype=np.int32),
                                     np.ones((Ncameras, ), dtype=np.int32)),
                           dtype=np.int32)
    indices_camera = indices_frame_camera[:, 1].reshape(Nframes, Ncameras)
    indices_camera.setfield(nps.outer(np.ones((Nframes, ), dtype=np.int32),
                                      np.arange(Ncameras, dtype=np.int32)),
                            dtype=np.int32)

    indices_frame_camintrinsics_camextrinsics = \
        nps.glue(indices_frame_camera,
                 indices_frame_camera[:,(1,)],
                 axis=-1)
    if not fixedframes:
        indices_frame_camintrinsics_camextrinsics[:, 2] -= 1

    ###########################################################################
    # p = mrcal.show_geometry(models_true,
    #                         frames          = frames_true,
    #                         object_width_n  = object_width_n,
    #                         object_height_n = object_height_n,
    #                         object_spacing  = object_spacing)
    # sys.exit()

    # I now reoptimize the perfect-observations problem. Without regularization,
    # this is a no-op: I'm already at the optimum. With regularization, this will
    # move us a certain amount (that the test will evaluate). Then I look at
    # noise-induced motions off this optimization optimum
    optimization_inputs_baseline = \
        dict( intrinsics                                = copy.deepcopy(intrinsics_true),
              points                                    = None,
              observations_board                        = observations_true,
              indices_frame_camintrinsics_camextrinsics = indices_frame_camintrinsics_camextrinsics,
              observations_point                        = None,
              indices_point_camintrinsics_camextrinsics = None,
              lensmodel                                 = lensmodel,
              calobject_warp                            = copy.deepcopy(calobject_warp_true),
              imagersizes                               = imagersizes,
              calibration_object_spacing                = object_spacing,
              verbose                                   = False,
              do_optimize_frames                        = not fixedframes,
              do_optimize_intrinsics_core               = False if model =='splined' else True,
              do_optimize_intrinsics_distortions        = True,
              do_optimize_extrinsics                    = True,
              do_optimize_calobject_warp                = True,
              do_apply_regularization                   = True,
              do_apply_outlier_rejection                = False)

    if fixedframes:
        # Frames are fixed: each camera has an independent pose
        optimization_inputs_baseline['extrinsics_rt_fromref'] = \
            copy.deepcopy(extrinsics_true_mounted)
        optimization_inputs_baseline['frames_rt_toref'] = copy.deepcopy(
            frames_true)
    else:
        # Frames are NOT fixed: cam0 is fixed as the reference coord system. I
        # transform each optimization extrinsics vector to be relative to cam0
        optimization_inputs_baseline['extrinsics_rt_fromref'] = \
            mrcal.compose_rt(extrinsics_true_mounted[1:,:],
                             mrcal.invert_rt(extrinsics_true_mounted[0,:]))
        optimization_inputs_baseline['frames_rt_toref'] = \
            mrcal.compose_rt(extrinsics_true_mounted[0,:], frames_true)

    mrcal.optimize(**optimization_inputs_baseline)

    models_baseline = \
        [ mrcal.cameramodel( optimization_inputs = optimization_inputs_baseline,
                             icam_intrinsics     = i) \
          for i in range(Ncameras) ]

    return                                                     \
        optimization_inputs_baseline,                          \
        models_true, models_baseline,                          \
        indices_frame_camintrinsics_camextrinsics,             \
        lensmodel, Nintrinsics, imagersizes,                   \
        intrinsics_true, extrinsics_true_mounted, frames_true, \
        observations_true,                                     \
        Nframes
Example #9
0
def image_transformation_map(model_from, model_to,

                             use_rotation                      = False,
                             plane_n                           = None,
                             plane_d                           = None,
                             mask_valid_intrinsics_region_from = False):

    r'''Compute a reprojection map between two models

SYNOPSIS

    model_orig = mrcal.cameramodel("xxx.cameramodel")
    image_orig = cv2.imread("image.jpg")

    model_pinhole = mrcal.pinhole_model_for_reprojection(model_orig,
                                                         fit = "corners")

    mapxy = mrcal.image_transformation_map(model_orig, model_pinhole)

    image_undistorted = mrcal.transform_image(image_orig, mapxy)

    # image_undistorted is now a pinhole-reprojected version of image_orig

Returns the transformation that describes a mapping

- from pixel coordinates of an image of a scene observed by model_to
- to pixel coordinates of an image of the same scene observed by model_from

This transformation can then be applied to a whole image by calling
mrcal.transform_image().

This function returns a transformation map in an (Nheight,Nwidth,2) array. The
image made by model_to will have shape (Nheight,Nwidth). Each pixel (x,y) in
this image corresponds to a pixel mapxy[y,x,:] in the image made by model_from.

This function has 3 modes of operation:

- intrinsics-only

  This is the default. Selected if

  - use_rotation = False
  - plane_n      = None
  - plane_d      = None

  All of the extrinsics are ignored. If the two cameras have the same
  orientation, then their observations of infinitely-far-away objects will line
  up exactly

- rotation

  This can be selected explicitly with

  - use_rotation = True
  - plane_n      = None
  - plane_d      = None

  Here we use the rotation component of the relative extrinsics. The relative
  translation is impossible to use without knowing what we're looking at, so IT
  IS ALWAYS IGNORED. If the relative orientation in the models matches reality,
  then the two cameras' observations of infinitely-far-away objects will line up
  exactly

- plane

  This is selected if

  - use_rotation = True
  - plane_n is not None
  - plane_d is not None

  We map observations of a given plane in camera FROM coordinates
  coordinates to where this plane would be observed by camera TO. This uses
  ALL the intrinsics, extrinsics and the plane representation. If all of
  these are correct, the observations of this plane would line up exactly in
  the remapped-camera-fromimage and the camera-to image. The plane is
  represented in camera-from coordinates by a normal vector plane_n, and the
  distance to the normal plane_d. The plane is all points p such that
  inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any
  scaling is compensated in plane_d.

ARGUMENTS

- model_from: the mrcal.cameramodel object describing the camera used to capture
  the input image

- model_to: the mrcal.cameramodel object describing the camera that would have
  captured the image we're producing

- use_rotation: optional boolean, defaulting to False. If True: we respect the
  relative rotation in the extrinsics of the camera models.

- plane_n: optional numpy array of shape (3,); None by default. If given, we
  produce a transformation to map observations of a given plane to the same
  pixels in the source and target images. This argument describes the normal
  vector in the coordinate system of model_from. The plane is all points p such
  that inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any
  scaling is compensated in plane_d. If given, plane_d should be given also, and
  use_rotation should be True. if given, we use the full intrinsics and
  extrinsics of both camera models

- plane_d: optional floating-point value; None by default. If given, we produce
  a transformation to map observations of a given plane to the same pixels in
  the source and target images. The plane is all points p such that
  inner(p,plane_n) = plane_d. plane_n does not need to be normalized; any
  scaling is compensated in plane_d. If given, plane_n should be given also, and
  use_rotation should be True. if given, we use the full intrinsics and
  extrinsics of both camera models

- mask_valid_intrinsics_region_from: optional boolean defaulting to False. If
  True, points outside the valid-intrinsics region in the FROM image are set to
  black, and thus do not appear in the output image

RETURNED VALUE

A numpy array of shape (Nheight,Nwidth,2) where Nheight and Nwidth represent the
imager dimensions of model_to. This array contains 32-bit floats, as required by
cv2.remap() (the function providing the internals of mrcal.transform_image()).
This array can be passed to mrcal.transform_image()

    '''

    if (plane_n is      None and plane_d is not None) or \
       (plane_n is not  None and plane_d is     None):
        raise Exception("plane_n and plane_d should both be None or neither should be None")
    if plane_n is not None and plane_d is not None and \
       not use_rotation:
        raise Exception("We're looking at remapping a plane (plane_d, plane_n are not None), so use_rotation should be True")

    Rt_to_from = None
    if use_rotation:
        Rt_to_r    = model_to.  extrinsics_Rt_fromref()
        Rt_r_from  = model_from.extrinsics_Rt_toref()
        Rt_to_from = mrcal.compose_Rt(Rt_to_r, Rt_r_from)

    lensmodel_from,intrinsics_data_from = model_from.intrinsics()
    lensmodel_to,  intrinsics_data_to   = model_to.  intrinsics()

    if re.match("LENSMODEL_OPENCV",lensmodel_from) and \
       lensmodel_to == "LENSMODEL_PINHOLE"         and \
       plane_n is None                             and \
       not mask_valid_intrinsics_region_from:

        # This is a common special case. This branch works identically to the
        # other path (the other side of this "if" can always be used instead),
        # but the opencv-specific code is optimized and at one point ran faster
        # than the code on the other side.
        #
        # The mask_valid_intrinsics_region_from isn't implemented in this path.
        # It COULD be, then this faster path could be used
        fxy_from = intrinsics_data_from[0:2]
        cxy_from = intrinsics_data_from[2:4]
        cameraMatrix_from = np.array(((fxy_from[0],          0, cxy_from[0]),
                                      ( 0,         fxy_from[1], cxy_from[1]),
                                      ( 0,                   0,           1)))

        fxy_to = intrinsics_data_to[0:2]
        cxy_to = intrinsics_data_to[2:4]
        cameraMatrix_to = np.array(((fxy_to[0],        0, cxy_to[0]),
                                    ( 0,       fxy_to[1], cxy_to[1]),
                                    ( 0,               0,         1)))

        output_shape = model_to.imagersize()
        distortion_coeffs = intrinsics_data_from[4: ]

        if Rt_to_from is not None:
            R_to_from = Rt_to_from[:3,:]
            if np.trace(R_to_from) > 3. - 1e-12:
                R_to_from = None # identity, so I pass None
        else:
            R_to_from = None

        return nps.glue( *[ nps.dummy(arr,-1) for arr in \
                            cv2.initUndistortRectifyMap(cameraMatrix_from, distortion_coeffs,
                                                        R_to_from,
                                                        cameraMatrix_to, tuple(output_shape),
                                                        cv2.CV_32FC1)],
                         axis = -1)

    W_from,H_from = model_from.imagersize()
    W_to,  H_to   = model_to.  imagersize()

    # shape: (Nheight,Nwidth,2). Contains (x,y) rows
    grid = np.ascontiguousarray(nps.mv(nps.cat(*np.meshgrid(np.arange(W_to),
                                                            np.arange(H_to))),
                                       0,-1),
                                dtype = float)
    if lensmodel_to == "LENSMODEL_PINHOLE":
        # Faster path for the unproject. Nice, simple closed-form solution
        fxy_to = intrinsics_data_to[0:2]
        cxy_to = intrinsics_data_to[2:4]
        v = np.zeros( (grid.shape[0], grid.shape[1], 3), dtype=float)
        v[..., :2] = (grid-cxy_to)/fxy_to
        v[...,  2] = 1
    elif lensmodel_to == "LENSMODEL_STEREOGRAPHIC":
        # Faster path for the unproject. Nice, simple closed-form solution
        v = mrcal.unproject_stereographic(grid, *intrinsics_data_to[:4])
    else:
        v = mrcal.unproject(grid, lensmodel_to, intrinsics_data_to)

    if plane_n is not None:

        R_to_from = Rt_to_from[:3,:]
        t_to_from = Rt_to_from[ 3,:]

        # The homography definition. Derived in many places. For instance in
        # "Motion and structure from motion in a piecewise planar environment"
        # by Olivier Faugeras, F. Lustman.
        A_to_from = plane_d * R_to_from + nps.outer(t_to_from, plane_n)
        A_from_to = np.linalg.inv(A_to_from)
        v = nps.matmult( v, nps.transpose(A_from_to) )

    else:
        if Rt_to_from is not None:
            R_to_from = Rt_to_from[:3,:]
            if np.trace(R_to_from) < 3. - 1e-12:
                # rotation isn't identity. apply
                v = nps.matmult(v, R_to_from)

    mapxy = mrcal.project( v, lensmodel_from, intrinsics_data_from )

    if mask_valid_intrinsics_region_from:

        # Using matplotlib to compute the out-of-bounds points. It doesn't
        # support broadcasting, so I do that manually with a clump/reshape
        from matplotlib.path import Path
        region = Path(model_from.valid_intrinsics_region())
        is_inside = region.contains_points(nps.clump(mapxy,n=2)).reshape(mapxy.shape[:2])
        mapxy[ ~is_inside, :] = -1


    return mapxy.astype(np.float32)
Example #10
0
def calibration_baseline(model,
                         Ncameras,
                         Nframes,
                         extra_observation_at,
                         pixel_uncertainty_stdev,
                         object_width_n,
                         object_height_n,
                         object_spacing,
                         extrinsics_rt_fromref_true,
                         calobject_warp_true,
                         fixedframes,
                         testdir,
                         cull_left_of_center=False):
    r'''Compute a calibration baseline as a starting point for experiments

This is a perfect, noiseless solve. Regularization IS enabled, and the returned
model is at the optimization optimum. So the returned models will not sit
exactly at the ground-truth

ARGUMENTS

- model: string. 'opencv4' or 'opencv8' or 'splined'

- ...

    '''

    if re.match('opencv', model):
        models_true = (
            mrcal.cameramodel(f"{testdir}/data/cam0.opencv8.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam0.opencv8.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam1.opencv8.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam1.opencv8.cameramodel"))

        if model == 'opencv4':
            # I have opencv8 models_true, but I truncate to opencv4 models_true
            for m in models_true:
                m.intrinsics(intrinsics=('LENSMODEL_OPENCV4',
                                         m.intrinsics()[1][:8]))
    elif model == 'splined':
        models_true = (
            mrcal.cameramodel(f"{testdir}/data/cam0.splined.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam0.splined.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam1.splined.cameramodel"),
            mrcal.cameramodel(f"{testdir}/data/cam1.splined.cameramodel"))
    else:
        raise Exception("Unknown lens being tested")

    models_true = models_true[:Ncameras]
    lensmodel = models_true[0].intrinsics()[0]
    Nintrinsics = mrcal.lensmodel_num_params(lensmodel)

    for i in range(Ncameras):
        models_true[i].extrinsics_rt_fromref(extrinsics_rt_fromref_true[i])

    imagersizes = nps.cat(*[m.imagersize() for m in models_true])

    # These are perfect
    intrinsics_true = nps.cat(*[m.intrinsics()[1] for m in models_true])
    extrinsics_true_mounted = nps.cat(
        *[m.extrinsics_rt_fromref() for m in models_true])
    x_center = -(Ncameras - 1) / 2.

    # shapes (Nframes, Ncameras, Nh, Nw, 2),
    #        (Nframes, 4,3)
    q_true,Rt_cam0_board_true = \
        mrcal.synthesize_board_observations(models_true,
                                            object_width_n, object_height_n, object_spacing,
                                            calobject_warp_true,
                                            np.array((0.,             0.,             0.,             x_center, 0,   4.0)),
                                            np.array((np.pi/180.*30., np.pi/180.*30., np.pi/180.*20., 2.5,      2.5, 2.0)),
                                            Nframes)

    if extra_observation_at:
        c = mrcal.ref_calibration_object(object_width_n, object_height_n,
                                         object_spacing, calobject_warp_true)
        Rt_cam0_board_true_far = \
            nps.glue( np.eye(3),
                      np.array((0,0,extra_observation_at)),
                      axis=-2)
        q_true_far = \
            mrcal.project(mrcal.transform_point_Rt(Rt_cam0_board_true_far, c),
                          *models_true[0].intrinsics())

        q_true = nps.glue(q_true_far, q_true, axis=-5)
        Rt_cam0_board_true = nps.glue(Rt_cam0_board_true_far,
                                      Rt_cam0_board_true,
                                      axis=-3)

        Nframes += 1

    frames_true = mrcal.rt_from_Rt(Rt_cam0_board_true)

    ############# I have perfect observations in q_true. I corrupt them by noise
    # weight has shape (Nframes, Ncameras, Nh, Nw),
    weight01 = (np.random.rand(*q_true.shape[:-1]) + 1.) / 2.  # in [0,1]
    weight0 = 0.2
    weight1 = 1.0
    weight = weight0 + (weight1 - weight0) * weight01

    if cull_left_of_center:

        imagersize = models_true[0].imagersize()
        for m in models_true[1:]:
            if np.any(m.imagersize() - imagersize):
                raise Exception(
                    "I'm assuming all cameras have the same imager size, but this is false"
                )

        weight[q_true[..., 0] < imagersize[0] / 2.] /= 1000.

    # I want observations of shape (Nframes*Ncameras, Nh, Nw, 3) where each row is
    # (x,y,weight)
    observations_true = nps.clump(nps.glue(q_true,
                                           nps.dummy(weight, -1),
                                           axis=-1),
                                  n=2)

    # Dense observations. All the cameras see all the boards
    indices_frame_camera = np.zeros((Nframes * Ncameras, 2), dtype=np.int32)
    indices_frame = indices_frame_camera[:, 0].reshape(Nframes, Ncameras)
    indices_frame.setfield(nps.outer(np.arange(Nframes, dtype=np.int32),
                                     np.ones((Ncameras, ), dtype=np.int32)),
                           dtype=np.int32)
    indices_camera = indices_frame_camera[:, 1].reshape(Nframes, Ncameras)
    indices_camera.setfield(nps.outer(np.ones((Nframes, ), dtype=np.int32),
                                      np.arange(Ncameras, dtype=np.int32)),
                            dtype=np.int32)

    indices_frame_camintrinsics_camextrinsics = \
        nps.glue(indices_frame_camera,
                 indices_frame_camera[:,(1,)],
                 axis=-1)
    if not fixedframes:
        indices_frame_camintrinsics_camextrinsics[:, 2] -= 1

    ###########################################################################
    # Now I apply pixel noise, and look at the effects on the resulting calibration.

    # p = mrcal.show_geometry(models_true,
    #                         frames          = frames_true,
    #                         object_width_n  = object_width_n,
    #                         object_height_n = object_height_n,
    #                         object_spacing  = object_spacing)
    # sys.exit()

    # I now reoptimize the perfect-observations problem. Without regularization,
    # this is a no-op: I'm already at the optimum. With regularization, this will
    # move us a certain amount (that the test will evaluate). Then I look at
    # noise-induced motions off this optimization optimum
    optimization_inputs_baseline = \
        dict( intrinsics                                = copy.deepcopy(intrinsics_true),
              extrinsics_rt_fromref                     = copy.deepcopy(extrinsics_true_mounted if fixedframes else extrinsics_true_mounted[1:,:]),
              frames_rt_toref                           = copy.deepcopy(frames_true),
              points                                    = None,
              observations_board                        = observations_true,
              indices_frame_camintrinsics_camextrinsics = indices_frame_camintrinsics_camextrinsics,
              observations_point                        = None,
              indices_point_camintrinsics_camextrinsics = None,
              lensmodel                                 = lensmodel,
              calobject_warp                            = copy.deepcopy(calobject_warp_true),
              imagersizes                               = imagersizes,
              calibration_object_spacing                = object_spacing,
              verbose                                   = False,
              observed_pixel_uncertainty                = pixel_uncertainty_stdev,
              do_optimize_frames                        = not fixedframes,
              do_optimize_intrinsics_core               = False if model =='splined' else True,
              do_optimize_intrinsics_distortions        = True,
              do_optimize_extrinsics                    = True,
              do_optimize_calobject_warp                = True,
              do_apply_regularization                   = True,
              do_apply_outlier_rejection                = False)
    mrcal.optimize(**optimization_inputs_baseline)

    models_baseline = \
        [ mrcal.cameramodel( optimization_inputs = optimization_inputs_baseline,
                             icam_intrinsics     = i) \
          for i in range(Ncameras) ]

    return                                                     \
        optimization_inputs_baseline,                          \
        models_true, models_baseline,                          \
        indices_frame_camintrinsics_camextrinsics,             \
        lensmodel, Nintrinsics, imagersizes,                   \
        intrinsics_true, extrinsics_true_mounted, frames_true, \
        observations_true,                                     \
        Nframes
Example #11
0
def plot_args_points_and_covariance_ellipse(q, what):
    q_mean = np.mean(q, axis=-2)
    q_mean0 = q - q_mean
    Var = np.mean(nps.outer(q_mean0, q_mean0), axis=0)
    return (plot_arg_covariance_ellipse(q_mean, Var, what),
            (q, dict(_with='points pt 6 ps 0.5', tuplesize=-2)))