예제 #1
0
def img_rms(f0, f1, RT):
    (ok, f1p, f1i0, f1i) = img_err(f0, f1, RT)
    diff = (f1i0 - f1i)
    error = diff * diff
    rms = math.sqrt(
        vop.where(ok, error, 0.0).sum() / vop.where(f1p, 1, 0).sum())
    return rms
예제 #2
0
def img_img(f0, f1, RT):
    (ok, f1p, f1i0, f1i) = img_err(f0, f1, RT)
    c_r = 255 * vop.where(ok, f1i, 0)
    c_g = 255 * vop.where(ok, f1i0, 0)
    c_b = 0 * c_g  # 255 * vop.where(ok & (abs(f1d0 - f0d) < 1.5), 1.0, 0.0)
    return Image.merge("RGB", [
        Image.fromstring("L", chipsize, c.tostring()) for c in [c_r, c_g, c_b]
    ])
예제 #3
0
def img_err(f0, f1, RT, stereo_cam):
    # Each point in f1's point cloud, compute xyz
    # Transform those points from f1's frame into f0's
    # Compute the uvd of each point
    # Sample f0 image using the uv

    def f_uvd(f):
        d_str = "".join([chr(min(x / 4, 255)) for x in f.disp_values])
        d = vop.array(
            Image.fromstring("L", (640, 480),
                             d_str).resize(chipsize, Image.NEAREST).tostring())
        f_valid = d != 255.0
        f_d = d / 4
        u = vop.duplicate(vop.arange(chipsize[0]), chipsize[1]) * factor
        v = vop.replicate(vop.arange(chipsize[1]), chipsize[0]) * factor
        i = vop.array(
            Image.fromstring("L", (640, 480), f.rawdata).resize(
                chipsize, Image.NEAREST).tostring()) / 255.
        return (f_valid, u, v, f_d, i)

    f0p, f0u, f0v, f0d, f0i = f_uvd(f0)
    f1p, f1u, f1v, f1d, f1i = f_uvd(f1)
    (f1x, f1y, f1z) = stereo_cam.pix2cam(f1u, f1v, f1d)
    (f1x0, f1y0, f1z0) = xform(RT, f1x, f1y, f1z)
    (f1u0, f1v0, f1d0) = stereo_cam.cam2pix(f1x0, f1y0, f1z0)
    addr = vop.floor(f1u0 / factor) + chipsize[0] * vop.floor(f1v0 / factor)
    ok = f1p & (0 <= f1u0) & (f1u0 < 640) & (0 <= f1v0) & (f1v0 < 480)
    addr = vop.where(ok, addr, 0)
    f1i0 = vop.take(f0i, addr)
    return (ok, f1p, f1i0, f1i)
  def match0(self, af0kp, af0descriptors, af1kp, af1descriptors, wide = False):
    """
    Given keypoints and descriptors for af0 and af1, returns
    a list of pairs *(a,b)*, where
    *a* is an index into this *af0*'s keypoints, and
    *b* is an index into *af1*'s keypoints.
    """

    if af0kp == [] or af1kp == []:
      return []
    if wide:
      xs = self.wxs
      ys = self.wys
    else:
      xs = self.xs
      ys = self.ys

    self.calls += 1
    self.timer['Match'].start()
    Xs = vop.array([k[0] for k in af1kp])
    Ys = vop.array([k[1] for k in af1kp])
    pairs = []
    matcher = self.desc2matcher(af1descriptors)
    for (i,(ki,di)) in enumerate(zip(af0kp, af0descriptors)):
      predX = (abs(Xs - ki[0]) < xs)
      predY = (abs(Ys - ki[1]) < ys)
      hits = vop.where(predX & predY, 1, 0).tostring()
      best = self.search(di, matcher, hits)
      if best != None:
        pairs.append((i, best[0], best[1]))
    self.timer['Match'].stop()
    return pairs
예제 #5
0
 def match(self, af0, af1):
     Xs = vop.array([k[0] for k in af1.kp])
     Ys = vop.array([k[1] for k in af1.kp])
     pairs = []
     for (i, (ki, di)) in enumerate(zip(af0.kp, af0.descriptors)):
         # hits = (Numeric.logical_and(Numeric.absolute(NXs - ki[0]) < 64, Numeric.absolute(NYs - ki[1]) < 32)).astype(Numeric.UnsignedInt8).tostring()
         predX = (abs(Xs - ki[0]) < 64)
         predY = (abs(Ys - ki[1]) < 32)
         hits = vop.where(predX & predY, 1, 0).tostring()
         best = VO.sad_search(di, af1.descriptors, hits)
         if best != None:
             pairs.append((i, best, 0))
     return pairs
예제 #6
0
 def match(self, af0, af1):
     Xs = vop.array([k[0] for k in af1.kp])
     Ys = vop.array([k[1] for k in af1.kp])
     pairs = []
     for (i, ki) in enumerate(af0.kp):
         # hits = (Numeric.logical_and(Numeric.absolute(NXs - ki[0]) < 64, Numeric.absolute(NYs - ki[1]) < 32)).astype(Numeric.UnsignedInt8).tostring()
         predX = (abs(Xs - ki[0]) < 64)
         predY = (abs(Ys - ki[1]) < 32)
         hits = vop.where(predX & predY, 1, 0).tostring()
         for (j, c) in enumerate(hits):
             if ord(c) != 0:
                 pairs.append((i, j))
     return pairs
예제 #7
0
 def match(self, af0, af1):
   Xs = vop.array([k[0] for k in af1.kp])
   Ys = vop.array([k[1] for k in af1.kp])
   pairs = []
   for (i,(ki,di)) in enumerate(zip(af0.kp,af0.descriptors)):
     # hits = (Numeric.logical_and(Numeric.absolute(NXs - ki[0]) < 64, Numeric.absolute(NYs - ki[1]) < 32)).astype(Numeric.UnsignedInt8).tostring()
     predX = (abs(Xs - ki[0]) < 64)
     predY = (abs(Ys - ki[1]) < 32)
     hits = vop.where(predX & predY, 1, 0).tostring()
     best = VO.sad_search(di, af1.descriptors, hits)
     if best != None:
       pairs.append((i, best, 0))
   return pairs
예제 #8
0
 def match(self, af0, af1):
   Xs = vop.array([k[0] for k in af1.kp])
   Ys = vop.array([k[1] for k in af1.kp])
   pairs = []
   for (i,ki) in enumerate(af0.kp):
     # hits = (Numeric.logical_and(Numeric.absolute(NXs - ki[0]) < 64, Numeric.absolute(NYs - ki[1]) < 32)).astype(Numeric.UnsignedInt8).tostring()
     predX = (abs(Xs - ki[0]) < 64)
     predY = (abs(Ys - ki[1]) < 32)
     hits = vop.where(predX & predY, 1, 0).tostring()
     for (j,c) in enumerate(hits):
       if ord(c) != 0:
         pairs.append((i, j))
   return pairs
예제 #9
0
 def match(self, af0, af1):
   if af0.kp == [] or af1.kp == []:
     return []
   Xs = vop.array([k[0] for k in af1.kp])
   Ys = vop.array([k[1] for k in af1.kp])
   pairs = []
   for (i,(ki,di)) in enumerate(zip(af0.kp,af0.descriptors)):
     predX = (abs(Xs - ki[0]) < 64)
     predY = (abs(Ys - ki[1]) < 32)
     hits = vop.where(predX & predY, 1, 0).tostring()
     best = self.search(di, af1, hits)
     if best != None:
       pairs.append((i, best[0], best[1]))
   return pairs
예제 #10
0
def img_err(f0, f1, RT, stereo_cam):
    # Each point in f1's point cloud, compute xyz
    # Transform those points from f1's frame into f0's
    # Compute the uvd of each point
    # Sample f0 image using the uv
    # Returns f0 warped into f1's frame

    def f_uvd(f):
        d_str = "".join([chr(min(x / 4, 255)) for x in f.disp_values])
        d = vop.array(
            Image.fromstring("L", (640, 480),
                             d_str).resize(chipsize, Image.NEAREST).tostring())
        f_valid = d != 255.0
        f_d = d / 4
        u = vop.duplicate(vop.arange(chipsize[0]), chipsize[1]) * factor
        v = vop.replicate(vop.arange(chipsize[1]), chipsize[0]) * factor
        i = vop.array(
            Image.fromstring("L", (640, 480), f.rawdata).resize(
                chipsize, Image.NEAREST).tostring()) / 255.
        return (f_valid, u, v, f_d, i)

    # Compute validity and (u,v,d) for both images
    f0p, f0u, f0v, f0d, f0i = f_uvd(f0)
    f1p, f1u, f1v, f1d, f1i = f_uvd(f1)

    # Take f1 in camera (x,y,z)
    (f1x, f1y, f1z) = stereo_cam.pix2cam(f1u, f1v, f1d)

    # Pass that via the given transform, to get transformed (x,y,z)
    (f1x0, f1y0, f1z0) = xform(RT, f1x, f1y, f1z)

    # Now take those x,y,z and map to u,v,d
    (f1u0, f1v0, f1d0) = stereo_cam.cam2pix(f1x0, f1y0, f1z0)

    # Sample f0 using those (u,v,d) points; this gives f0 in f1's frame, called f1i0

    addr = vop.floor(f1u0 / factor) + chipsize[0] * vop.floor(f1v0 / factor)
    ok = f1p & (0 <= f1u0) & (f1u0 < 640) & (0 <= f1v0) & (f1v0 < 480)
    addr = vop.where(ok, addr, 0)
    f1i0 = vop.take(f0i, addr)

    # Return:
    #  ok: which pixels are reliable in f1i0
    #  f1p: which pixels are good in f1
    #  f1i0: as computed above
    #  f1i: the original f1 image
    return (ok, f1p, f1i0, f1i)
예제 #11
0
 def scavenger(self, diff_pose, frame):
   af0 = self.keyframe
   af1 = frame
   Xs = vop.array([k[0] for k in af1.kp])
   Ys = vop.array([k[1] for k in af1.kp])
   pairs = []
   fwd_pose = ~diff_pose
   for (i,(ki,di)) in enumerate(zip(af0.kp,af0.descriptors)):
     (x,y,d) = self.cam.cam2pix(*fwd_pose.xform(*self.cam.pix2cam(*ki)))
     predX = (abs(Xs - x) < 4)
     predY = (abs(Ys - y) < 4)
     hits = vop.where(predX & predY, 1, 0).tostring()
     best = self.descriptor_scheme.search(di, af1, hits)
     if best != None:
       pairs.append((i, best[0], best[1]))
   self.pairs = [(i0,i1) for (i0,i1,d) in pairs]
   solution = self.solve(af0.kp, af1.kp, self.pairs)
   return solution
예제 #12
0
 def scavenger(self, diff_pose, af0, af1):
     Xs = vop.array([k[0] for k in af1.features()])
     Ys = vop.array([k[1] for k in af1.features()])
     pairs = []
     fwd_pose = ~diff_pose
     ds = af1.descriptor_scheme
     matcher = ds.desc2matcher(af1.descriptors())
     for (i, (ki, di)) in enumerate(zip(af0.features(), af0.descriptors())):
         (x, y,
          d) = self.cam.cam2pix(*fwd_pose.xform(*self.cam.pix2cam(*ki)))
         predX = (abs(Xs - x) < 4)
         predY = (abs(Ys - y) < 4)
         hits = vop.where(predX & predY, 1, 0).tostring()
         best = ds.search(di, matcher, hits)
         if best != None:
             pairs.append((i, best[0], best[1]))
     self.pairs = [(i0, i1) for (i0, i1, d) in pairs]
     if False:
         import pylab
         f0, f1 = af0, af1
         for (a, b) in self.pairs:
             pylab.plot([f0.features()[a][0],
                         f1.features()[b][0]],
                        [f0.features()[a][1],
                         f1.features()[b][1]])
         pylab.imshow(numpy.fromstring(af0.lf.tostring(),
                                       numpy.uint8).reshape(480, 640),
                      cmap=pylab.cm.gray)
         pylab.scatter([x for (x, y, d) in f0.features()],
                       [y for (x, y, d) in f0.features()],
                       label='%d kp' % f0.id,
                       c='red')
         pylab.scatter([x for (x, y, d) in f1.features()],
                       [y for (x, y, d) in f1.features()],
                       label='%d kp' % f1.id,
                       c='green')
         pylab.legend()
         pylab.show()
     solution = self.solve(af0.features(), af1.features(), self.pairs)
     return solution
예제 #13
0
def img_img(f0, f1, RT, stereo_cam):
    (ok, f1p, f1i0, f1i) = img_err(f0, f1, RT, stereo_cam)
    c_r = vop.where(ok, f1i, 0)
    c_g = vop.where(ok, f1i0, 0)
    return c_r, c_g
예제 #14
0
    def estimateC(self, cam1, cp1, cam0, cp0, pairs, polish=True):
        """
    Pose Estimator.  Returns the relative pose between two camera views.
    *cam1* and *cam0* are the two cameras.  *cp0* and *cp1* are the
    (u,v,d) coordinates of the keypoints in the views.  *pairs* is a list
    of matched pairs of keypoints between the frames.
    """

        if len(pairs) < 3:
            return (0, None, None)

        # Compute the arrays for xyz for RANSAC sampling.
        (p0_x, p0_y, p0_z) = cam0.pix2cam(*vop3(cp0))
        (p1_x, p1_y, p1_z) = cam1.pix2cam(*vop3(cp1))

        # Compute the paired arrays (x0,y0,z0) and (u1,v1,d1) for RANSAC confirmation
        p0 = zip(p0_x, p0_y, p0_z)
        x0, y0, z0 = vop3([p0[i] for (i, j) in pairs])
        u1, v1, d1 = vop3([cp1[j] for (i, j) in pairs])

        # Generate the random triplets.  Fiddling here is to do the "without
        # replacement" picks.  Formally (pick0[i] != pick1[i] != pick2[i])
        np = len(pairs)
        pick0 = vop.floor(self.r0 * np)
        pick1 = vop.floor(self.r1 * (np - 1))
        pick1 = vop.where(pick1 < pick0, pick1, pick1 + 1)
        pick2 = vop.floor(self.r2 * (np - 2))
        pick2 = vop.where(pick2 < vop.minimum(pick0, pick1), pick2, pick2 + 1)
        pick2 = vop.where(pick2 < vop.maximum(pick0, pick1), pick2, pick2 + 1)

        # Keep track of current best guess
        best = (0, None, None)
        best_inl = []

        for ransac in range(self.ransac_iterations):
            #triple = self.rnd.sample(pairs, 3)
            triple = (pairs[int(pick0[ransac])], pairs[int(pick1[ransac])],
                      pairs[int(pick2[ransac])])

            # Find a pair of xyzs, then supply them to SVD to produce a pose
            ((a, _), (b, _), (c, _)) = triple
            p0s = [
                p0_x[a], p0_x[b], p0_x[c], p0_y[a], p0_y[b], p0_y[c], p0_z[a],
                p0_z[b], p0_z[c]
            ]

            ((_, aa), (_, bb), (_, cc)) = triple
            p1s = [
                p1_x[aa], p1_x[bb], p1_x[cc], p1_y[aa], p1_y[bb], p1_y[cc],
                p1_z[aa], p1_z[bb], p1_z[cc]
            ]

            def toofar(d0, d1):
                return d0 == 0 or d1 == 0 or (d1 / d0) > 1.1 or (d0 / d1) > 1.1

            if False:
                # Optimization from Rufus: Check if there is any scale change between the pairs
                p0s_dist_ab = (p0_x[a] - p0_x[b])**2 + (
                    p0_y[a] - p0_y[b])**2 + (p0_z[a] - p0_z[b])**2
                p1s_dist_ab = (p1_x[aa] - p1_x[bb])**2 + (
                    p1_y[aa] - p1_y[bb])**2 + (p1_z[aa] - p1_z[bb])**2

                if toofar(p1s_dist_ab, p0s_dist_ab):
                    continue

                p0s_dist_cb = (p0_x[c] - p0_x[b])**2 + (
                    p0_y[c] - p0_y[b])**2 + (p0_z[c] - p0_z[b])**2
                p1s_dist_cb = (p1_x[cc] - p1_x[bb])**2 + (
                    p1_y[cc] - p1_y[bb])**2 + (p1_z[cc] - p1_z[bb])**2

                if toofar(p1s_dist_cb, p0s_dist_cb):
                    continue

                p0s_dist_ac = (p0_x[a] - p0_x[c])**2 + (
                    p0_y[a] - p0_y[c])**2 + (p0_z[a] - p0_z[c])**2
                p1s_dist_ac = (p1_x[aa] - p1_x[cc])**2 + (
                    p1_y[aa] - p1_y[cc])**2 + (p1_z[aa] - p1_z[cc])**2

                if toofar(p1s_dist_ac, p0s_dist_ac):
                    continue

            R, T, RT = VOLO.SVD(p0s, p1s)
            #R,T,RT = VOLO.SVDe(p0s, p1s)

            # Check inliers for RT: xyz0 -> uvd0 vs uvd1
            (u0, v0, d0) = cam0.cam2pix(*xform(RT, x0, y0, z0))
            pred_inl = vop.where(
                vop.maximum(vop.maximum(abs(u0 - u1), abs(v0 - v1)),
                            abs(d0 - d1)) > self.iet, 0.0, 1.0)
            inliers = int(pred_inl.sum())
            if inliers > best[0]:
                best = (inliers, R, T)
                best_inl = pred_inl

        self.inl = [P for (P, F) in zip(pairs, best_inl) if F]
        if polish and (best[0] > 6):
            uvds0Inlier = [cp0[a] for (a, _) in self.inl]
            uvds1Inlier = [cp1[b] for (_, b) in self.inl]
            carttodisp = cam0.cart_to_disp()
            disptocart = cam1.disp_to_cart()
            (inliers, R, T) = best
            (R, T) = VOLO.polish(uvds0Inlier, uvds1Inlier, carttodisp,
                                 disptocart, R, T)
            best = (inliers, R, T)

        return best