Ejemplo n.º 1
0
def process_frame(img):
  img = cv2.resize(img, (W,H))
  frame = Frame(mapp, img, K)
  if frame.id == 0:
    return
  print("\n*** frame %d ***" % (frame.id,))

  f1 = mapp.frames[-1]
  f2 = mapp.frames[-2]

  idx1, idx2, Rt = match_frames(f1, f2)
  f1.pose = np.dot(Rt, f2.pose)

  for i,idx in enumerate(idx2):
    if f2.pts[idx] is not None:
      f2.pts[idx].add_observation(f1, idx1[i])

  good_pts4d = np.array([f1.pts[i] is None for i in idx1])

  # locally in front of camera
  # reject pts without enough "parallax" (this right?)
  pts_tri_local = triangulate(Rt, np.eye(4), f1.kps[idx1], f2.kps[idx2])
  good_pts4d &= np.abs(pts_tri_local[:, 3]) > 0.005

  # homogeneous 3-D coords
  # reject points behind the camera
  pts_tri_local /= pts_tri_local[:, 3:]
  good_pts4d &= pts_tri_local[:, 2] > 0

  # project into world
  pts4d = np.dot(np.linalg.inv(f1.pose), pts_tri_local.T).T

  print("Adding:   %d points" % np.sum(good_pts4d))

  for i,p in enumerate(pts4d):
    if not good_pts4d[i]:
      continue
    u,v = int(round(f1.kpus[idx1[i],0])), int(round(f1.kpus[idx1[i],1]))
    pt = Point(mapp, p, img[v,u])
    pt.add_observation(f1, idx1[i])
    pt.add_observation(f2, idx2[i])

  for pt1, pt2 in zip(f1.kps[idx1], f2.kps[idx2]):
    u1, v1 = denormalize(K, pt1)
    u2, v2 = denormalize(K, pt2)
    cv2.circle(img, (u1, v1), color=(0,255,0), radius=3)
    cv2.line(img, (u1, v1), (u2, v2), color=(255,0,0))

  # 2-D display
  if disp is not None:
    disp.paint(img)

  # optimize the map
  if frame.id >= 4:
    err = mapp.optimize()
    print("Optimize: %f units of error" % err)

  # 3-D display
  mapp.display()
Ejemplo n.º 2
0
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(mapp, img, K)
    if frame.id == 0:
        return

    print("\n*** frame %d ***" % (frame.id, ))

    f1 = mapp.frames[-1]
    f2 = mapp.frames[-2]

    idx1, idx2, Rt = match_frames(f1, f2)
    #print("=============================", idx1, idx2, Rt,f1.pts,f2.pts)
    f1.pose = np.dot(Rt, f2.pose)

    for i, idx in enumerate(idx2):
        if f2.pts[idx] is not None:
            f2.pts[idx].add_observation(f1, idx1[i])

    # homogeneous 3-D coords
    pts4d = triangulate(f1.pose, f2.pose, f1.kps[idx1], f2.kps[idx2])
    pts4d /= pts4d[:, 3:]
    #print(pts4d)

    # reject pts without enough "parallax" (this right?)
    # reject points behind the camera
    unmatched_points = np.array([f1.pts[i] is None for i in idx1])
    print("Adding:  %d points" % np.sum(unmatched_points))
    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] >
                                                  0) & unmatched_points
    #print(sum(good_pts4d), len(good_pts4d))
    #print(good_pts4d)

    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(mapp, p)
        pt.add_observation(f1, idx1[i])
        pt.add_observation(f2, idx2[i])

    for pt1, pt2 in zip(f1.kps[idx1], f2.kps[idx2]):
        u1, v1 = denormalize(K, pt1)
        u2, v2 = denormalize(K, pt2)
        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))

    # 2-D display
    if disp is not None:
        disp.paint(img)

    # optimize the map
    if frame.id >= 4:
        mapp.optimize()

    # 3-D display
    mapp.display()
Ejemplo n.º 3
0
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(img, mapp, K)

    if frame.id == 0:
        return

    f1 = mapp.frames[-1]
    f2 = mapp.frames[-2]

    idx1, idx2, Rt = match_frames(f1, f2)
    f1.pose = np.dot(Rt, f2.pose)

    pts4d = triangulate(f1.pose, f2.pose, f1.pts[idx1], f2.pts[idx2])

    #homogeneous 3-Dcoords
    pts4d /= pts4d[:, 3:]

    #rejecting pts without good parallax
    #reject pts behind cam
    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0)

    #print(sum(good_pts4d), len(good_pts4d))

    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(mapp, p)
        pt.add_observation(f1, idx1[i])
        pt.add_observation(f2, idx2[i])

    #print(f1.pose)
    #print(pts4d)

    for pt1, pt2 in zip(f1.pts[idx1], f2.pts[idx2]):
        #u1, v1 = map(lambda x: int(round(x)), pt1)
        #u2, v2 = map(lambda x: int(round(x)), pt2)

        u1, v1 = denormalize(K, pt1)
        u2, v2 = denormalize(K, pt2)

        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))

    #print(img.shape)

    # 2-D display
    if disp is not None: disp.paint(img)

    #3-D display
    mapp.display()
Ejemplo n.º 4
0
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(mapp, img, K)
    if frame.id == 0:
        return

    f1 = mapp.frames[-1]
    f2 = mapp.frames[-2]

    idx1, idx2, Rt = match_frames(f1, f2)
    f1.pose = np.dot(Rt, f2.pose)

    # homogeneous 3-D coords
    pts4d = triangulate(f1.pose, f2.pose, f1.pts[idx1], f2.pts[idx2])
    pts4d /= pts4d[:, 3:]

    # reject pts without enough "parallax" (this right?)
    # reject points behind the camera
    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0)

    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(mapp, p)
        pt.add_observation(f1, idx1[i])
        pt.add_observation(f2, idx2[i])

    for pt1, pt2 in zip(f1.pts[idx1], f2.pts[idx2]):
        u1, v1 = denormalize(K, pt1)
        u2, v2 = denormalize(K, pt2)
        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))

    # 2-D display
    if disp is not None:
        disp.paint(img)

    # 3-D display
    mapp.display()
Ejemplo n.º 5
0
def process_frame(img):
    img = cv2.resize(img, (W, H))
    frame = Frame(mapp, img, K)
    if frame.id == 0:
        return

    f1 = mapp.frames[-1]
    f2 = mapp.frames[-2]

    idx1, idx2, Rt = match_frames(f1, f2)
    f1.pose = np.dot(Rt, f2.pose)

    pts4d = triangulate(f1.pose, f2.pose, f1.pts[idx1], f2.pts[idx2])
    pts4d /= pts4d[:, 3:]

    # Reject points without enough "Parallax" and points behind the camera
    good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0)

    for i, p in enumerate(pts4d):
        if not good_pts4d[i]:
            continue
        pt = Point(mapp, p)
        pt.add_observation(f1, i)
        pt.add_observation(f2, i)

    for pt1, pt2 in zip(f1.pts[idx1], f2.pts[idx2]):
        u1, v1 = denormalize(K, pt1)
        u2, v2 = denormalize(K, pt2)

        cv2.circle(img, (u1, v1), 3, (0, 255, 0))
        cv2.line(img, (u1, v1), (u2, v2), (255, 0, 0))

    # 2-D display
    display.paint(img)

    # 3-D display
    mapp.display()
Ejemplo n.º 6
0
    def match_points(self, pts4d, good_pts4d, frame_1, frame_2, idx1, idx2,
                     img, new_pts_count):
        for i, p in enumerate(pts4d):
            if not good_pts4d[i]:
                continue

            # check parallax is large enough
            # TODO: learn what parallax means
            """
            r1 = np.dot(frame_1.pose[:3, :3], add_ones(frame_1.kps[idx1[i]]))
            r2 = np.dot(frame_2.pose[:3, :3], add_ones(frame_2.kps[idx2[i]]))
            parallax = r1.dot(r2) / (np.linalg.norm(r1) * np.linalg.norm(r2))
            if parallax >= 0.9998:
              continue
            """

            # check points are in front of both cameras
            pl1 = np.dot(frame_1.pose, p)
            pl2 = np.dot(frame_2.pose, p)
            if pl1[2] < 0 or pl2[2] < 0:
                continue

            # reproject
            pp1 = np.dot(self.K, pl1[:3])
            pp2 = np.dot(self.K, pl2[:3])

            # check reprojection error
            pp1 = (pp1[0:2] / pp1[2]) - frame_1.key_points[idx1[i]]
            pp2 = (pp2[0:2] / pp2[2]) - frame_2.key_points[idx2[i]]
            pp1 = np.sum(pp1**2)
            pp2 = np.sum(pp2**2)
            if pp1 > 2 or pp2 > 2:
                continue

            # add the point
            try:
                color = img[int(round(frame_1.key_points[idx1[i], 1])),
                            int(round(frame_1.key_points[idx1[i], 0]))]
            except IndexError:
                color = (255, 0, 0)
            pt = Point(self.mapp, p[0:3], color)
            connect_frame_point(frame_2, pt, idx2[i])
            connect_frame_point(frame_1, pt, idx1[i])
            new_pts_count += 1
Ejemplo n.º 7
0
    def process_frame(self, img, pose=None):
        start_time = time.time()
        assert img.shape[0:2] == (self.H, self.W)
        frame = Frame(self.mapp, img, self.K)

        if frame.id == 0:
            return

        f1 = self.mapp.frames[-1]
        f2 = self.mapp.frames[-2]

        idx1, idx2, Rt = match_frames(f1, f2)

        # add new observations if the point is already observed in the previous frame
        # TODO: consider tradeoff doing this before/after search by projection
        for i, idx in enumerate(idx2):
            if f2.pts[idx] is not None and f1.pts[idx1[i]] is None:
                f2.pts[idx].add_observation(f1, idx1[i])

        if frame.id < 5 or True:
            # get initial positions from fundamental matrix
            f1.pose = np.dot(Rt, f2.pose)
        else:
            # kinematic model (not used)
            velocity = np.dot(f2.pose,
                              np.linalg.inv(self.mapp.frames[-3].pose))
            f1.pose = np.dot(velocity, f2.pose)

        # pose optimization
        if pose is None:
            #print(f1.pose)
            pose_opt = self.mapp.optimize(local_window=1, fix_points=True)
            print("Pose:     %f" % pose_opt)
            #print(f1.pose)
        else:
            # have ground truth for pose
            f1.pose = pose

        sbp_pts_count = 0

        # search by projection
        if len(self.mapp.points) > 0:
            # project *all* the map points into the current frame
            map_points = np.array([p.homogeneous() for p in self.mapp.points])
            projs = np.dot(np.dot(K, f1.pose[:3]), map_points.T).T
            projs = projs[:, 0:2] / projs[:, 2:]

            # only the points that fit in the frame
            good_pts = (projs[:, 0] > 0) & (projs[:, 0] < self.W) & \
                       (projs[:, 1] > 0) & (projs[:, 1] < self.H)

            for i, p in enumerate(self.mapp.points):
                if not good_pts[i]:
                    # point not visible in frame
                    continue
                if f1 in p.frames:
                    # we already matched this map point to this frame
                    # TODO: understand this better
                    continue
                for m_idx in f1.kd.query_ball_point(projs[i], 2):
                    # if point unmatched
                    if f1.pts[m_idx] is None:
                        b_dist = p.orb_distance(f1.des[m_idx])
                        # if any descriptors within 64
                        if b_dist < 64.0:
                            p.add_observation(f1, m_idx)
                            sbp_pts_count += 1
                            break

        # triangulate the points we don't have matches for
        good_pts4d = np.array([f1.pts[i] is None for i in idx1])

        # do triangulation in global frame
        pts4d = triangulate(f1.pose, f2.pose, f1.kps[idx1], f2.kps[idx2])
        good_pts4d &= np.abs(pts4d[:, 3]) != 0
        pts4d /= pts4d[:, 3:]  # homogeneous 3-D coords

        # adding new points to the map from pairwise matches
        new_pts_count = 0
        for i, p in enumerate(pts4d):
            if not good_pts4d[i]:
                continue

            # check parallax is large enough
            # TODO: learn what parallax means
            """
      r1 = np.dot(f1.pose[:3, :3], add_ones(f1.kps[idx1[i]]))
      r2 = np.dot(f2.pose[:3, :3], add_ones(f2.kps[idx2[i]]))
      parallax = r1.dot(r2) / (np.linalg.norm(r1) * np.linalg.norm(r2))
      if parallax >= 0.9998:
        continue
      """

            # check points are in front of both cameras
            pl1 = np.dot(f1.pose, p)
            pl2 = np.dot(f2.pose, p)
            if pl1[2] < 0 or pl2[2] < 0:
                continue

            # reproject
            pp1 = np.dot(K, pl1[:3])
            pp2 = np.dot(K, pl2[:3])

            # check reprojection error
            pp1 = (pp1[0:2] / pp1[2]) - f1.kpus[idx1[i]]
            pp2 = (pp2[0:2] / pp2[2]) - f2.kpus[idx2[i]]
            pp1 = np.sum(pp1**2)
            pp2 = np.sum(pp2**2)
            if pp1 > 2 or pp2 > 2:
                continue

            # add the point
            color = img[int(round(f1.kpus[idx1[i], 1])),
                        int(round(f1.kpus[idx1[i], 0]))]
            pt = Point(self.mapp, p[0:3], color)
            pt.add_observation(f2, idx2[i])
            pt.add_observation(f1, idx1[i])
            new_pts_count += 1

        print("Adding:   %d new points, %d search by projection" %
              (new_pts_count, sbp_pts_count))

        # optimize the map
        if frame.id >= 4 and frame.id % 5 == 0:
            err = self.mapp.optimize()  #verbose=True)
            print("Optimize: %f units of error" % err)

        print("Map:      %d points, %d frames" %
              (len(self.mapp.points), len(self.mapp.frames)))
        print("Time:     %.2f ms" % ((time.time() - start_time) * 1000.0))
        print(np.linalg.inv(f1.pose))
Ejemplo n.º 8
0
def process_frame(img):
  start_time = time.time()
  img = cv2.resize(img, (W,H))
  frame = Frame(mapp, img, K)
  if frame.id == 0:
    return

  f1 = mapp.frames[-1]
  f2 = mapp.frames[-2]

  idx1, idx2, Rt = match_frames(f1, f2)

  if frame.id < 5:
    # get initial positions from fundamental matrix
    f1.pose = np.dot(Rt, f2.pose)
  else:
    # kinematic model
    velocity = np.dot(f2.pose, np.linalg.inv(mapp.frames[-3].pose))
    f1.pose = np.dot(velocity, f2.pose)

  for i,idx in enumerate(idx2):
    if f2.pts[idx] is not None:
      f2.pts[idx].add_observation(f1, idx1[i])

  # pose optimization
  #print(f1.pose)
  pose_opt = mapp.optimize(local_window=1, fix_points=True)
  print("Pose:     %f" % pose_opt)
  #print(f1.pose)

  # search by projection
  sbp_pts_count = 0
  if len(mapp.points) > 0:
    map_points = np.array([p.homogeneous() for p in mapp.points])
    projs = np.dot(np.dot(K, f1.pose[:3]), map_points.T).T
    projs = projs[:, 0:2] / projs[:, 2:]
    good_pts = (projs[:, 0] > 0) & (projs[:, 0] < W) & \
               (projs[:, 1] > 0) & (projs[:, 1] < H)
    for i, p in enumerate(mapp.points):
      if not good_pts[i]:
        continue
      q = f1.kd.query_ball_point(projs[i], 5)
      for m_idx in q:
        if f1.pts[m_idx] is None:
          # if any descriptors within 32
          for o in p.orb():
            o_dist = hamming_distance(o, f1.des[m_idx])
            if o_dist < 32.0:
              p.add_observation(f1, m_idx)
              sbp_pts_count += 1
              break

  good_pts4d = np.array([f1.pts[i] is None for i in idx1])

  # reject pts without enough "parallax" (this right?)
  pts4d = triangulate(f1.pose, f2.pose, f1.kps[idx1], f2.kps[idx2])
  good_pts4d &= np.abs(pts4d[:, 3]) > 0.005

  # homogeneous 3-D coords
  pts4d /= pts4d[:, 3:]

  # locally in front of camera
  # NOTE: This check is broken and maybe unneeded
  #pts_tri_local = np.dot(f1.pose, pts4d.T).T
  #good_pts4d &= pts_tri_local[:, 2] > 0

  print("Adding:   %d new points, %d search by projection" % (np.sum(good_pts4d), sbp_pts_count))

  for i,p in enumerate(pts4d):
    if not good_pts4d[i]:
      continue
    u,v = int(round(f1.kpus[idx1[i],0])), int(round(f1.kpus[idx1[i],1]))
    pt = Point(mapp, p[0:3], img[v,u])
    pt.add_observation(f1, idx1[i])
    pt.add_observation(f2, idx2[i])

  for i1, i2 in zip(idx1, idx2):
    pt1 = f1.kps[i1]
    pt2 = f2.kps[i2]
    u1, v1 = denormalize(K, pt1)
    u2, v2 = denormalize(K, pt2)
    if f1.pts[i1] is not None:
      if len(f1.pts[i1].frames) >= 5:
        cv2.circle(img, (u1, v1), color=(0,255,0), radius=3)
      else:
        cv2.circle(img, (u1, v1), color=(0,128,0), radius=3)
    else:
      cv2.circle(img, (u1, v1), color=(0,0,0), radius=3)
    cv2.line(img, (u1, v1), (u2, v2), color=(255,0,0))

  # 2-D display
  if disp is not None:
    disp.paint(img)

  # optimize the map
  if frame.id >= 4 and frame.id%5 == 0:
    err = mapp.optimize()
    print("Optimize: %f units of error" % err)

  # 3-D display
  mapp.display()
  print("Map:      %d points, %d frames" % (len(mapp.points), len(mapp.frames)))
  print("Time:     %.2f ms" % ((time.time()-start_time)*1000.0))
Ejemplo n.º 9
0
def process_frame(img, pose=None):
  start_time = time.time()
  img = cv2.resize(img, (W,H))
  frame = Frame(mapp, img, K)
  if frame.id == 0:
    return

  f1 = mapp.frames[-1]
  f2 = mapp.frames[-2]

  idx1, idx2, Rt = match_frames(f1, f2)

  # add new observations if the point is already observed in the previous frame
  # TODO: consider tradeoff doing this before/after search by projection
  for i,idx in enumerate(idx2):
    if f2.pts[idx] is not None and f1.pts[idx1[i]] is None:
      f2.pts[idx].add_observation(f1, idx1[i])

  if frame.id < 5:
    # get initial positions from fundamental matrix
    f1.pose = np.dot(Rt, f2.pose)
  else:
    # kinematic model
    velocity = np.dot(f2.pose, np.linalg.inv(mapp.frames[-3].pose))
    f1.pose = np.dot(velocity, f2.pose)

  # pose optimization
  if pose is None:
    #print(f1.pose)
    pose_opt = mapp.optimize(local_window=1, fix_points=True)
    print("Pose:     %f" % pose_opt)
    #print(f1.pose)
  else:
    # have ground truth for pose
    f1.pose = pose

  # search by projection
  sbp_pts_count = 0
  if len(mapp.points) > 0:
    map_points = np.array([p.homogeneous() for p in mapp.points])
    projs = np.dot(np.dot(K, f1.pose[:3]), map_points.T).T
    projs = projs[:, 0:2] / projs[:, 2:]
    good_pts = (projs[:, 0] > 0) & (projs[:, 0] < W) & \
               (projs[:, 1] > 0) & (projs[:, 1] < H)
    for i, p in enumerate(mapp.points):
      if not good_pts[i]:
        continue
      q = f1.kd.query_ball_point(projs[i], 5)
      for m_idx in q:
        if f1.pts[m_idx] is None:
          # if any descriptors within 32
          for o in p.orb():
            o_dist = hamming_distance(o, f1.des[m_idx])
            if o_dist < 32.0:
              p.add_observation(f1, m_idx)
              sbp_pts_count += 1
              break

  # triangulate the points we don't have matches for
  good_pts4d = np.array([f1.pts[i] is None for i in idx1])

  # do triangulation in local frame
  lpose = np.dot(f1.pose, np.linalg.inv(f2.pose))
  pts_local = triangulate(lpose, np.eye(4), f1.kps[idx1], f2.kps[idx2])
  good_pts4d &= np.abs(pts_local[:, 3]) > 0.01
  pts_local /= pts_local[:, 3:]       # homogeneous 3-D coords
  good_pts4d &= pts_local[:, 2] > 0   # locally in front of camera
  pts4d = np.dot(np.linalg.inv(f2.pose), pts_local.T).T

  print("Adding:   %d new points, %d search by projection" % (np.sum(good_pts4d), sbp_pts_count))

  # adding new points to the map from pairwise matches
  for i,p in enumerate(pts4d):
    if not good_pts4d[i]:
      continue
    u,v = int(round(f1.kpus[idx1[i],0])), int(round(f1.kpus[idx1[i],1]))
    pt = Point(mapp, p[0:3], img[v,u])
    pt.add_observation(f1, idx1[i])
    pt.add_observation(f2, idx2[i])

  # 2-D display
  if disp2d is not None:
    # paint annotations on the image
    for i1, i2 in zip(idx1, idx2):
      u1, v1 = int(round(f1.kpus[i1][0])), int(round(f1.kpus[i1][1]))
      u2, v2 = int(round(f2.kpus[i2][0])), int(round(f2.kpus[i2][1]))
      if f1.pts[i1] is not None:
        if len(f1.pts[i1].frames) >= 5:
          cv2.circle(img, (u1, v1), color=(0,255,0), radius=3)
        else:
          cv2.circle(img, (u1, v1), color=(0,128,0), radius=3)
      else:
        cv2.circle(img, (u1, v1), color=(0,0,0), radius=3)
      cv2.line(img, (u1, v1), (u2, v2), color=(255,0,0))
    disp2d.paint(img)

  # optimize the map
  if frame.id >= 4 and frame.id%5 == 0:
    err = mapp.optimize()
    print("Optimize: %f units of error" % err)

  # 3-D display
  if disp3d is not None:
    disp3d.paint(mapp)

  print("Map:      %d points, %d frames" % (len(mapp.points), len(mapp.frames)))
  print("Time:     %.2f ms" % ((time.time()-start_time)*1000.0))
  print(np.linalg.inv(f1.pose))