Beispiel #1
0
def _noisy_observation_vectors_for_triangulation(p, Rt01, intrinsics0,
                                                 intrinsics1, Nsamples, sigma):

    # p has shape (...,3)

    # shape (..., 2)
    q0 = mrcal.project(p, *intrinsics0)
    q1 = mrcal.project(mrcal.transform_point_Rt(mrcal.invert_Rt(Rt01), p),
                       *intrinsics1)

    # shape (..., 1,2). Each has x,y
    q0 = nps.dummy(q0, -2)
    q1 = nps.dummy(q1, -2)

    q_noise = np.random.randn(*p.shape[:-1], Nsamples, 2, 2) * sigma
    # shape (..., Nsamples,2). Each has x,y
    q0_noise = q_noise[..., :, 0, :]
    q1_noise = q_noise[..., :, 1, :]

    q0_noisy = q0 + q0_noise
    q1_noisy = q1 + q1_noise

    # shape (..., Nsamples, 3)
    v0local_noisy = mrcal.unproject(q0_noisy, *intrinsics0)
    v1local_noisy = mrcal.unproject(q1_noisy, *intrinsics1)
    v0_noisy = v0local_noisy
    v1_noisy = mrcal.rotate_point_R(Rt01[:3, :], v1local_noisy)

    # All have shape (..., Nsamples,3)
    return \
        v0local_noisy, v1local_noisy, v0_noisy,v1_noisy, \
        q0,q1, q0_noisy, q1_noisy
Beispiel #2
0
def _parse_args(v1, t01, get_gradients, v_are_local, Rt01):
    r'''Parse arguments to triangulation functions that take camera-0-referenced v
    AND t01'''

    if Rt01 is not None and t01 is not None:
        raise Exception(
            "Exactly one of Rt01 and t01 must be None. Both were non-None")

    if Rt01 is None and t01 is None:
        raise Exception(
            "Exactly one of Rt01 and t01 must be None. Both were None")

    if v_are_local:
        if get_gradients:
            raise Exception(
                "get_gradients is True, so v_are_local MUST be the default: False"
            )
        if Rt01 is None:
            raise Exception(
                "v_are_local is True, so Rt01 MUST have been given")
        v1 = mrcal.rotate_point_R(Rt01[:3, :], v1)
        t01 = Rt01[3, :]
    else:
        # Normal path
        if t01 is None:
            t01 = Rt01[3, :]
            if get_gradients:
                raise Exception(
                    "get_gradients is True, so t01 MUST have been given")
        else:
            # Normal path
            pass

    return v1, t01
Beispiel #3
0
out36 = base[6, :3, :6, 2]
out3 = base[1, 3, 6:9, 1]
out6 = base[1, 4, :6, 1]
out66 = base[5, 3:9, 3:9, 2]
out66a = base[6, 3:9, 3:9, 2]

confirm_equal(mrcal.identity_R(out=out33), np.eye(3), msg='identity_R')
confirm_equal(mrcal.identity_Rt(out=out43),
              nps.glue(np.eye(3), np.zeros((3, ), ), axis=-2),
              msg='identity_Rt')
confirm_equal(mrcal.identity_r(out=out3), np.zeros((3, )), msg='identity_r')
confirm_equal(mrcal.identity_rt(out=out6), np.zeros((6, )), msg='identity_rt')

################# rotate_point_R
y = \
    mrcal.rotate_point_R(R0_ref, x, out = out3)
confirm_equal(y,
              nps.matmult(x, nps.transpose(R0_ref)),
              msg='rotate_point_R result')

y, J_R, J_x = \
    mrcal.rotate_point_R(R0_ref, x, get_gradients=True,
                         out = (out3,out333,out33))
J_R_ref = grad(lambda R: nps.matmult(x, nps.transpose(R)), R0_ref)
J_x_ref = R0_ref
confirm_equal(y,
              nps.matmult(x, nps.transpose(R0_ref)),
              msg='rotate_point_R result')
confirm_equal(J_R, J_R_ref, msg='rotate_point_R J_R')
confirm_equal(J_x, J_x_ref, msg='rotate_point_R J_x')
Beispiel #4
0
        v0_rect = mrcal.unproject_pinhole(np.array((np.tan(az0), np.tan(el0))))
        v0_rect /= nps.mag(v0_rect)
        testutils.confirm_equal( v0_rect, v0,
                                 msg=f'vanilla stereo: az0,el0 represent the same point ({lensmodel})',
                                 eps = 1e-3)

    dq0x = np.array((1e-1, 0))
    dq0y = np.array((0, 1e-1))
    v0x  = mrcal.unproject(q0+dq0x, *models_rectified[0].intrinsics())
    v0y  = mrcal.unproject(q0+dq0y, *models_rectified[0].intrinsics())
    dthx = np.arccos(nps.inner(v0x,v0)/np.sqrt(nps.norm2(v0x)*nps.norm2(v0)))
    dthy = np.arccos(nps.inner(v0y,v0)/np.sqrt(nps.norm2(v0y)*nps.norm2(v0)))
    pixels_per_rad_az_rect = nps.mag(dq0x)/dthx
    pixels_per_rad_el_rect = nps.mag(dq0y)/dthy

    q0_cam0  = mrcal.project(mrcal.rotate_point_R(Rt_cam0_rect[:3,:], v0),
                             *model0.intrinsics())
    q0x_cam0 = mrcal.project(mrcal.rotate_point_R(Rt_cam0_rect[:3,:], v0x),
                             *model0.intrinsics())
    q0y_cam0 = mrcal.project(mrcal.rotate_point_R(Rt_cam0_rect[:3,:], v0y),
                             *model0.intrinsics())
    pixels_per_rad_az_cam0 = nps.mag(q0x_cam0 - q0_cam0)/dthx
    pixels_per_rad_el_cam0 = nps.mag(q0y_cam0 - q0_cam0)/dthy

    testutils.confirm_equal(pixels_per_rad_az_rect * 8.,
                            pixels_per_rad_az_cam0,
                            msg=f'vanilla stereo: az pixel density ({lensmodel})',
                            eps = 0.1)

    testutils.confirm_equal(pixels_per_rad_el_rect * 4.,
                            pixels_per_rad_el_cam0,
Beispiel #5
0
def stereo_rectify_prepare(models,
                           az_fov_deg,
                           el_fov_deg,
                           az0_deg           = None,
                           el0_deg           = 0,
                           pixels_per_deg_az = None,
                           pixels_per_deg_el = None):

    r'''Precompute everything needed for stereo rectification and matching

SYNOPSIS

    import sys
    import mrcal
    import cv2
    import numpy as np

    # Read commandline arguments: model0 model1 image0 image1
    models = [ mrcal.cameramodel(sys.argv[1]),
               mrcal.cameramodel(sys.argv[2]), ]

    images = [ cv2.imread(sys.argv[i]) \
               for i in (3,4) ]

    # Prepare the stereo system
    rectification_maps,cookie = \
        mrcal.stereo_rectify_prepare(models,
                                     az_fov_deg = 120,
                                     el_fov_deg = 100)

    # Visualize the geometry of the two cameras and of the rotated stereo
    # coordinate system
    Rt_cam0_ref    = models[0].extrinsics_Rt_fromref()
    Rt_cam0_stereo = cookie['Rt_cam0_stereo']
    Rt_stereo_ref  = mrcal.compose_Rt( mrcal.invert_Rt(Rt_cam0_stereo),
                                       Rt_cam0_ref )
    rt_stereo_ref  = mrcal.rt_from_Rt(Rt_stereo_ref)

    mrcal.show_geometry( models + [ rt_stereo_ref ],
                         ( "camera0", "camera1", "stereo" ),
                         show_calobjects = False,
                         wait            = True )

    # Rectify the images
    images_rectified = \
      [ mrcal.transform_image(images[i], rectification_maps[i]) \
        for i in range(2) ]

    cv2.imwrite('/tmp/rectified0.jpg', images_rectified[0])
    cv2.imwrite('/tmp/rectified1.jpg', images_rectified[1])

    # Find stereo correspondences using OpenCV
    block_size = 3
    max_disp   = 160 # in pixels
    stereo = \
        cv2.StereoSGBM_create(minDisparity      = 0,
                              numDisparities    = max_disp,
                              blockSize         = block_size,
                              P1                = 8 *3*block_size*block_size,
                              P2                = 32*3*block_size*block_size,
                              uniquenessRatio   = 5,

                              disp12MaxDiff     = 1,
                              speckleWindowSize = 50,
                              speckleRange      = 1)
    disparity16 = stereo.compute(*images_rectified) # in pixels*16

    cv2.imwrite('/tmp/disparity.png',
                mrcal.apply_color_map(disparity16,
                                      0, max_disp*16.))

    # Convert the disparities to range to camera0
    r = mrcal.stereo_range( disparity16.astype(np.float32) / 16.,
                            **cookie )

    cv2.imwrite('/tmp/range.png', mrcal.apply_color_map(r, 5, 1000))

This function does the initial computation required to perform stereo matching,
and to get ranges from a stereo pair. It computes

- the pose of the rectified stereo coordinate system

- the azimuth/elevation grid used in the rectified images

- the rectification maps used to transform images into the rectified space

Using the results of one call to this function we can compute the stereo
disparities of many pairs of synchronized images.

This function is generic: the two cameras may have any lens models, any
resolution and any geometry. They don't even have to match. As long as there's
some non-zero baseline and some overlapping views, we can set up stereo matching
using this function. The input images are tranformed into a "rectified" space.
Geometrically, the rectified coordinate system sits at the origin of camera0,
with a rotation. The axes of the rectified coordinate system:

- x: from the origin of camera0 to the origin of camera1 (the baseline direction)

- y: completes the system from x,z

- z: the "forward" direction of the two cameras, with the component parallel to
     the baseline subtracted off

In a nominal geometry (the two cameras are square with each other, camera1
strictly to the right of camera0), the rectified coordinate system exactly
matches the coordinate system of camera0. The above formulation supports any
geometry, however, including vertical and/or forward/backward shifts. Vertical
stereo is supported.

Rectified images represent 3D planes intersecting the origins of the two
cameras. The tilt of each plane is the "elevation". While the left/right
direction inside each plane is the "azimuth". We generate rectified images where
each pixel coordinate represents (x = azimuth, y = elevation). Thus each row
scans the azimuths in a particular elevation, and thus each row in the two
rectified images represents the same plane in 3D, and matching features in each
row can produce a stereo disparity and a range.

In the rectified system, elevation is a rotation along the x axis, while azimuth
is a rotation normal to the resulting tilted plane.

We produce rectified images whose pixel coordinates are linear with azimuths and
elevations. This means that the azimuth angular resolution is constant
everywhere, even at the edges of a wide-angle image.

We return a set of transformation maps and a cookie. The maps can be used to
generate rectified images. These rectified images can be processed by any
stereo-matching routine to generate a disparity image. To interpret the
disparity image, call stereo_unproject() or stereo_range() with the cookie
returned here.

The cookie is a Python dict that describes the rectified space. It is guaranteed
to have the following keys:

- Rt_cam0_stereo: an Rt transformation to map a representation of points in the
  rectified coordinate system to a representation in the camera0 coordinate system

- baseline: the distance between the two cameras

- az_row: an array of shape (Naz,) describing the azimuths in each row of the
  disparity image

- el_col: an array of shape (Nel,1) describing the elevations in each column of
  the disparity image

ARGUMENTS

- models: an iterable of two mrcal.cameramodel objects representing the cameras
  in the stereo system. Any sane combination of lens models and resolutions and
  geometries is valid

- az_fov_deg: required value for the azimuth (along-the-baseline) field-of-view
  of the desired rectified view, in pixels

- el_fov_deg: required value for the elevation (across-the-baseline)
  field-of-view of the desired rectified view, in pixels

- az0_deg: optional value for the azimuth center of the rectified images. This
  is especially significant in a camera system with some forward/backward shift.
  That causes the baseline to no longer be perpendicular with the view axis of
  the cameras, and thus azimuth = 0 is no longer at the center of the input
  images. If omitted, we compute the center azimuth that aligns with the center
  of the cameras' view

- el0_deg: optional value for the elevation center of the rectified system.
  Defaults to 0.

- pixels_per_deg_az: optional value for the azimuth resolution of the rectified
  image. If omitted (or None), we use the resolution of the input image at
  (azimuth, elevation) = 0. If a resolution of <0 is requested, we use this as a
  scale factor on the resolution of the input image. For instance, to downsample
  by a factor of 2, pass pixels_per_deg_az = -0.5

- pixels_per_deg_el: same as pixels_per_deg_az but in the elevation direction

RETURNED VALUES

We return a tuple

- transformation maps: a tuple of length-2 containing transformation maps for
  each camera. Each map can be used to mrcal.transform_image() images to the
  rectified space

- cookie: a dict describing the rectified space. Intended as input to
  stereo_unproject() and stereo_range(). See the description above for more
  detail

    '''

    if len(models) != 2:
        raise Exception("I need exactly 2 camera models")

    def normalize(v):
        v /= nps.mag(v)
        return v

    def remove_projection(a, proj_base):
        r'''Returns the normalized component of a orthogonal to proj_base

        proj_base assumed normalized'''
        v = a - nps.inner(a,proj_base)*proj_base
        return normalize(v)

    ######## Compute the geometry of the rectified stereo system. This is a
    ######## rotation, centered at camera0. More or less we have axes:
    ########
    ######## x: from camera0 to camera1
    ######## y: completes the system from x,z
    ######## z: component of the cameras' viewing direction
    ########    normal to the baseline
    Rt_cam0_ref = models[0].extrinsics_Rt_fromref()
    Rt01 = mrcal.compose_Rt( Rt_cam0_ref,
                             models[1].extrinsics_Rt_toref())
    Rt10 = mrcal.invert_Rt(Rt01)

    # Rotation relating camera0 coords to the rectified camera coords. I fill in
    # each row separately
    R_stereo_cam0 = np.zeros((3,3), dtype=float)
    right         = R_stereo_cam0[0,:]
    down          = R_stereo_cam0[1,:]
    forward       = R_stereo_cam0[2,:]

    # "right" of the rectified coord system: towards the origin of camera1 from
    # camera0, in camera0 coords
    right[:] = Rt01[3,:]
    baseline = nps.mag(right)
    right   /= baseline

    # "forward" for each of the two cameras, in the cam0 coord system
    forward0 = np.array((0,0,1.))
    forward1 = Rt01[:3,2]

    # "forward" of the rectified coord system, in camera0 coords. The mean of
    # the two non-right "forward" directions
    forward[:] = normalize( ( remove_projection(forward0,right) +
                              remove_projection(forward1,right) ) / 2. )

    # "down" of the rectified coord system, in camera0 coords. Completes the
    # right,down,forward coordinate system
    down[:] = np.cross(forward,right)

    R_cam0_stereo = nps.transpose(R_stereo_cam0)



    ######## Done with the geometry! Now to get the az/el grid. I need to figure
    ######## out the resolution and the extents


    if az0_deg is not None:
        az0 = az0_deg * np.pi/180.

    else:
        # In the rectified system az=0 sits perpendicular to the baseline.
        # Normally the cameras are looking out perpendicular to the baseline
        # also, so I center my azimuth samples around 0 to match the cameras'
        # field of view. But what if the geometry isn't square, and one camera
        # is behind the other? Like this:
        #
        #    camera
        #     view
        #       ^
        #       |
        #     \ | /
        #      \_/
        #        .    /
        #         .  /az=0
        #          ./
        #           .
        #  baseline  .
        #             .
        #            \   /
        #             \_/
        #
        # Here the center-of-view axis of each camera is not at all
        # perpendicular to the baseline. Thus I compute the mean "forward"
        # direction of the cameras in the rectified system, and set that as the
        # center azimuth az0.
        v0 = nps.matmult( forward0, R_cam0_stereo ).ravel()
        v1 = nps.matmult( forward1, R_cam0_stereo ).ravel()
        v0[1] = 0.0
        v1[1] = 0.0
        normalize(v0)
        normalize(v1)
        v = v0 + v1
        az0 = np.arctan2(v[0],v[2])


    el0 = el0_deg * np.pi/180.

    ####### Rectified image resolution
    if pixels_per_deg_az is None or pixels_per_deg_az < 0 or \
       pixels_per_deg_el is None or pixels_per_deg_el < 0:
        # I need to compute the resolution of the rectified images. I try to
        # match the resolution of the cameras. I just look at camera0. If you
        # have different cameras, pass in pixels_per_deg yourself :)
        #
        # I look at the center of the stereo field of view. There I have q =
        # project(v) where v is a unit projection vector. I compute dq/dth where
        # th is an angular perturbation applied to v.
        def rotation_any_v_to_z(v):
            r'''Return any rotation matrix that maps the given unit vector v to [0,0,1]'''
            z = v
            if np.abs(v[0]) < .9:
                x = np.array((1,0,0))
            else:
                x = np.array((0,1,0))
            x -= nps.inner(x,v)*v
            x /= nps.mag(x)
            y = np.cross(z,x)

            return nps.cat(x,y,z)


        v, dv_dazel = stereo_unproject(az0, el0, get_gradients = True)
        v0          = mrcal.rotate_point_R(R_cam0_stereo, v)
        dv0_dazel   = nps.matmult(R_cam0_stereo, dv_dazel)

        _,dq_dv0,_ = mrcal.project(v0, *models[0].intrinsics(), get_gradients = True)

        # I rotate my v to a coordinate system where u = rotate(v) is [0,0,1].
        # Then u = [a,b,0] are all orthogonal to v. So du/dth = [cos, sin, 0].
        # I then have dq/dth = dq/dv dv/du [cos, sin, 0]t
        # ---> dq/dth = dq/dv dv/du[:,:2] [cos, sin]t = M [cos,sin]t
        #
        # norm2(dq/dth) = [cos,sin] MtM [cos,sin]t is then an ellipse with the
        # eigenvalues of MtM giving me the best and worst sensitivities. I can
        # use mrcal.worst_direction_stdev() to find the densest direction. But I
        # actually know the directions I care about, so I evaluate them
        # independently for the az and el directions

        # Ruv = rotation_any_v_to_z(v0)
        # M = nps.matmult(dq_dv0, nps.transpose(Ruv[:2,:]))
        # # I pick the densest direction: highest |dq/dth|
        # pixels_per_rad = mrcal.worst_direction_stdev( nps.matmult( nps.transpose(M),M) )

        if pixels_per_deg_az is None or pixels_per_deg_az < 0:
            dq_daz = nps.inner( dq_dv0, dv0_dazel[:,0] )
            pixels_per_rad_az_have = nps.mag(dq_daz)

            if pixels_per_deg_az is not None:
                # negative pixels_per_deg_az requested means I use the requested
                # value as a scaling
                pixels_per_deg_az = -pixels_per_deg_az * pixels_per_rad_az_have*np.pi/180.
            else:
                pixels_per_deg_az = pixels_per_rad_az_have*np.pi/180.

        if pixels_per_deg_el is None or pixels_per_deg_el < 0:
            dq_del = nps.inner( dq_dv0, dv0_dazel[:,1] )
            pixels_per_rad_el_have = nps.mag(dq_del)

            if pixels_per_deg_el is not None:
                # negative pixels_per_deg_el requested means I use the requested
                # value as a scaling
                pixels_per_deg_el = -pixels_per_deg_el * pixels_per_rad_el_have*np.pi/180.
            else:
                pixels_per_deg_el = pixels_per_rad_el_have*np.pi/180.



    Naz = round(az_fov_deg*pixels_per_deg_az)
    Nel = round(el_fov_deg*pixels_per_deg_el)

    # Adjust the fov to keep the requested resolution and pixel counts
    az_fov_radius_deg = Naz / (2.*pixels_per_deg_az)
    el_fov_radius_deg = Nel / (2.*pixels_per_deg_el)

    # shape (Naz,)
    az = np.linspace(az0 - az_fov_radius_deg*np.pi/180.,
                     az0 + az_fov_radius_deg*np.pi/180.,
                     Naz)
    # shape (Nel,1)
    el = nps.dummy( np.linspace(el0 - el_fov_radius_deg*np.pi/180.,
                                el0 + el_fov_radius_deg*np.pi/180.,
                                Nel),
                    -1 )

    # v has shape (Nel,Naz,3)
    v = stereo_unproject(az, el)

    v0 = nps.matmult( nps.dummy(v,  -2), R_stereo_cam0 )[...,0,:]
    v1 = nps.matmult( nps.dummy(v0, -2), Rt01[:3,:]    )[...,0,:]

    cookie = \
        dict( Rt_cam0_stereo    = nps.glue(R_cam0_stereo, np.zeros((3,)), axis=-2),
              baseline          = baseline,
              az_row            = az,
              el_col            = el,

              # The caller should NOT assume these are available in the cookie:
              # some other rectification scheme may not produce linear az/el
              # maps
              pixels_per_deg_az = pixels_per_deg_az,
              pixels_per_deg_el = pixels_per_deg_el,
            )

    return                                                                \
        (mrcal.project( v0, *models[0].intrinsics()).astype(np.float32),  \
         mrcal.project( v1, *models[1].intrinsics()).astype(np.float32)), \
        cookie
Beispiel #6
0
out33d = base[4, :3, :3, 2]
out36 = base[6, :3, :6, 2]
out3 = base[1, 3, 6:9, 1]
out6 = base[1, 4, :6, 1]
out66 = base[5, 3:9, 3:9, 2]
out66a = base[6, 3:9, 3:9, 2]

confirm_equal(mrcal.identity_R(out=out33), np.eye(3), msg='identity_R')
confirm_equal(mrcal.identity_Rt(out=out43),
              nps.glue(np.eye(3), np.zeros((3, ), ), axis=-2),
              msg='identity_Rt')
confirm_equal(mrcal.identity_r(out=out3), np.zeros((3, )), msg='identity_r')
confirm_equal(mrcal.identity_rt(out=out6), np.zeros((6, )), msg='identity_rt')

y = \
    mrcal.rotate_point_R(R0_ref, x, out = out3)
confirm_equal(y,
              nps.matmult(x, nps.transpose(R0_ref)),
              msg='rotate_point_R result')

y, J_R, J_x = \
    mrcal.rotate_point_R(R0_ref, x, get_gradients=True,
                         out = (out3,out333,out33))
J_R_ref = grad(lambda R: nps.matmult(x, nps.transpose(R)), R0_ref)
J_x_ref = R0_ref
confirm_equal(y,
              nps.matmult(x, nps.transpose(R0_ref)),
              msg='rotate_point_R result')
confirm_equal(J_R, J_R_ref, msg='rotate_point_R J_R')
confirm_equal(J_x, J_x_ref, msg='rotate_point_R J_x')
Beispiel #7
0
def triangulate_lindstrom(v0, v1, Rt01, get_gradients=False, v_are_local=True):
    r'''Triangulation minimizing the 2-norm of pinhole reprojection errors

SYNOPSIS

    models = ( mrcal.cameramodel('cam0.cameramodel'),
               mrcal.cameramodel('cam1.cameramodel') )

    images = (cv2.imread('image0.jpg', cv2.IMREAD_GRAYSCALE),
              cv2.imread('image1.jpg', cv2.IMREAD_GRAYSCALE))

    Rt01 = mrcal.compose_Rt( models[0].extrinsics_Rt_fromref(),
                             models[1].extrinsics_Rt_toref() )

    R01 = Rt01[:3,:]
    t01 = Rt01[ 3,:]

    # pixel observation in camera0
    q0 = np.array((1233, 2433), dtype=np.float32)

    # corresponding pixel observation in camera1
    q1, _ = \
        mrcal.match_feature( *images,
                             template_size = (17,17),
                             method        = cv2.TM_CCORR_NORMED,
                             search_radius = 20,
                             q0            = q0,
                             H10           = H10, # homography mapping q0 to q1
                           )

    # observation vectors in the LOCAL coordinate system of the two cameras
    v0 = mrcal.unproject(q0, *models[0].intrinsics())
    v1 = mrcal.unproject(q1, *models[1].intrinsics())

    # Estimated 3D position in camera-0 coordinates of the feature observed in
    # the two cameras
    p = mrcal.triangulate_lindstrom( v0, v1, Rt01 )

This function implements a triangulation routine minimizing the 2-norm of
reprojection errors, ASSUMING a pinhole projection. This is described in

  "Triangulation Made Easy", Peter Lindstrom, IEEE Conference on Computer Vision
  and Pattern Recognition, 2010.

This is the "L2 img 5-iteration" method in the paper

  "Triangulation: Why Optimize?", Seong Hun Lee and Javier Civera.
  https://arxiv.org/abs/1907.11917

but with only 2 iterations (Lindstrom's paper recommends 2 iterations). This
Lee, Civera paper compares many methods. This routine works decently well, but
it isn't the best. The angular methods should work better than this one for wide
lenses. triangulate_leecivera_mid2() (or triangulate_leecivera_wmid2() if we're
near the cameras) are preferred, according to the paper.

The assumption of a pinhole projection is a poor one when using a wide lens, and
looking away from the optical center. The Lee-Civera triangulation functions
don't have this problem, and are generally faster. See the Lee, Civera paper for
details.

If the triangulated point lies behind either camera (i.e. if the observation
rays are parallel or divergent), (0,0,0) is returned.

This function supports broadcasting fully.

This function takes a full transformation Rt01, instead of t01 like most of the
other triangulation functions do by default. The other function may take Rt01,
for API compatibility.

Also, by default this function takes v1 in the camera-1-local coordinate system
unlike most of the other triangulation routines. If not v_are_local: then v1 is
interpreted in the camera-0 coordinate system instead. This makes it simple to
compare the routines against one another.

The invocation compatible across all the triangulation routines omits t01, and
passes Rt01 and v_are_local:

  triangulate_...( v0, v1,
                   Rt01        = Rt01,
                   v_are_local = False )

Gradient reporting is possible in the default case of v_are_local is True

ARGUMENTS

- v0: (3,) numpy array containing a not-necessarily-normalized observation
  vector of a feature observed in camera-0, described in the camera-0 coordinate
  system

- v1: (3,) numpy array containing a not-necessarily-normalized observation
  vector of a feature observed in camera-1, described in the camera-1 coordinate
  system by default (v_are_local is True). Note that this vector is represented
  in the camera-local coordinate system, unlike the representation in all the
  other triangulation routines

- Rt01: (4,3) numpy array describing the transformation from camera-1
  coordinates to camera-0 coordinates

- get_gradients: optional boolean that defaults to False. Whether we should
  compute and report the gradients. This affects what we return. If
  get_gradients: v_are_local must have the default value

- v_are_local: optional boolean that defaults to True. If True: v1 is
  represented in the local coordinate system of camera-1. This is different from
  the other triangulation routines. Set v_are_local to False to make this
  function interpret v1 similarly to the other triangulation routines. Must have
  the default value if get_gradients

RETURNED VALUE

if not get_gradients:

  we return an (...,3) array of triangulated point positions in the camera-0
  coordinate system

if get_gradients: we return a tuple:

  - (...,3) array of triangulated point positions
  - (...,3,3) array of the gradients of the triangulated positions in respect to
    v0
  - (...,3,3) array of the gradients of the triangulated positions in respect to
    v1
  - (...,3,4,3) array of the gradients of the triangulated positions in respect
    to Rt01

    '''

    if not v_are_local:
        if get_gradients:
            raise Exception(
                "get_gradients is True, so v_are_local MUST be True")
        v1 = mrcal.rotate_point_R(nps.transpose(Rt01[:3, :]), v1)

    if not get_gradients:
        return mrcal._mrcal_npsp._triangulate_lindstrom(v0, v1, Rt01)
    else:
        return mrcal._mrcal_npsp._triangulate_lindstrom_withgrad(v0, v1, Rt01)