示例#1
0
def getErrors(pts3d, projs, C, Rt_prev, Rt_curr, baseline, imgsz, is_pixelized=True):
   f = C[0,0]
   cu = C[0,2]
   cv = C[1,2]
   pts3d_prev = Rt_prev.dot(pts3d)
   #pts3d_curr = Rt_curr.dot(pts3d)
   # Rt_prev * Rt_inc = Rt_curr
   #Rt_inc = nph.inv_Rt(Rt_prev).dot(Rt_curr)
   # Rt_curr * Rt_inc = Rt_prev
   #Rt_inc = nph.inv_Rt(Rt_curr).dot(Rt_prev)
   Rt_inc = nph.inv_Rt(Rt_prev.dot(nph.inv_Rt(Rt_curr)))
   triangpt_p = np.zeros((4))
   triang_err = reproj_err = 0.0
   num_pts = pts3d.shape[1]
   for i in range(num_pts):
      if projs[0,0,i] < -0.5 or projs[1,0,i] < -0.5 or projs[2,0,i] < -0.5 or projs[3,0,i] < -0.5:
         num_pts -= 1
         continue
      # TODO - min disparity?
      # triangulate in previous left camera
      assert (projs[0,0,i] - projs[1,0,i]) >= 0
      if is_pixelized:
         d = np.max([projs[0,0,i] - projs[1,0,i], 1.0])
      else:
         d = projs[0,0,i] - projs[1,0,i]
      triangpt_p[0] = (projs[0,0,i] - cu) * baseline / d
      triangpt_p[1] = (projs[0,1,i] - cv) * baseline / d
      triangpt_p[2] = f * baseline / d
      triangpt_p[3] = 1.0
      # calculate triangulation error
      triang_err += np.linalg.norm(pts3d_prev[:,i] - triangpt_p)
      # calculate reprojection error
      # TODO
      # project in prev left frame - same as projs[0,:,i]
      ptproj_p = project_points(imgsz, C, np.eye(4), triangpt_p.reshape(4,1))
      # project in current left frame
      ptproj_c = project_points(imgsz, C, Rt_inc, triangpt_p.reshape(4,1))
      pt2d_lc = projs[2,:,i].reshape(2,1)
      reproj_err += np.linalg.norm(pt2d_lc - ptproj_c)      

   assert num_pts > 0
   return [triang_err/num_pts, reproj_err/num_pts, num_pts]
    ## so they are inverse of points transform Rt matrix
    #Rt_prev_inv = Rt_mats[i,:].reshape(4,4)
    #Rt_curr_inv = Rt_mats[i+1,:].reshape(4,4)
    ## calculate point trasform Rt matrices in 2 frames (inverse of camera transform matrices)
    ## slower way
    ##Rt_prev = np.linalg.inv(Rt_prev_inv)
    ##Rt_curr =  np.linalg.inv(Rt_curr_inv)
    ## faster (and better) way
    #Rt_prev = nph.inv_Rt(Rt_prev_inv)
    #Rt_curr =  nph.inv_Rt(Rt_curr_inv)

    #print(Rt_prev)
    #print(nph.inv_Rt(Rt_prev_inv))
    # project 3d point on image plane
    print("Frame: " + str(i))
    Rt = nph.inv_Rt(Rt_mats[i,:].reshape(4,4))
    pts2d_left = project_points(imgsz, C, Rt, pts3d)
    pts2d_right = project_points(imgsz, C, Tb.dot(Rt), pts3d)
    # round them up in pixels
    pixelize(imgsz, pts2d_left)
    pixelize(imgsz, pts2d_right)
    update_age(age, pts2d_left, pts2d_right, i)

    projs_left[i,:,:] = pts2d_left
    projs_right[i,:,:] = pts2d_right
    #print("Frame " + str(i) + "\npoints visible: " + "%d / %d" % (visible_num, pts3d.shape[2]))

    #print("Plotting 3d points")
    #visible_pts = getVisibleStatus(projs_left)
    #plot_pts3d(pts3d, visible_pts)
    #plot_flow(pts2d_left, pts2d_right, imgsz)
示例#3
0
   # Tb transform puts right camera in center
   Tb = np.eye(4)
   Tb[0,3] = -baseline[b]
   print("Tb:\n", Tb, "\n")
   for i in range(nframes-1):
      # inputs are camera position matrices in each frame
      # so they are inverse of points transform Rt matrix
      Rt_prev_inv = Rt_mats[i,:].reshape(4,4)
      Rt_curr_inv = Rt_mats[i+1,:].reshape(4,4)
      
      # calculate point trasform Rt matrices in 2 frames (inverse of camera transform matrices)
      # slower way
      #Rt_prev = np.linalg.inv(Rt_prev_inv)
      #Rt_curr =  np.linalg.inv(Rt_curr_inv)
      # faster (and better) way
      Rt_prev = nph.inv_Rt(Rt_prev_inv)
      Rt_curr =  nph.inv_Rt(Rt_curr_inv)
      #print(Rt_prev)
      Rt_prev = nph.inv_Rt(Rt_prev_inv)
      Rt_curr =  nph.inv_Rt(Rt_curr_inv)

      #print(Rt_prev)
      #print(nph.inv_Rt(Rt_prev_inv))
      # project 3d point on image plane
      pts2d_leftp = project_points(imgsz, C, Rt_prev, pts3d[i,:,:])
      pts2d_rightp = project_points(imgsz, C, Tb.dot(Rt_prev), pts3d[i,:,:])
      # round them up in pixels
      pixelize(imgsz, pts2d_leftp)
      pixelize(imgsz, pts2d_rightp)
      # do the same for current frame
      pts2d_leftc = project_points(imgsz, C, Rt_curr, pts3d[i,:,:])
    ## so they are inverse of points transform Rt matrix
    #Rt_prev_inv = Rt_mats[i,:].reshape(4,4)
    #Rt_curr_inv = Rt_mats[i+1,:].reshape(4,4)
    ## calculate point trasform Rt matrices in 2 frames (inverse of camera transform matrices)
    ## slower way
    ##Rt_prev = np.linalg.inv(Rt_prev_inv)
    ##Rt_curr =  np.linalg.inv(Rt_curr_inv)
    ## faster (and better) way
    #Rt_prev = nph.inv_Rt(Rt_prev_inv)
    #Rt_curr =  nph.inv_Rt(Rt_curr_inv)

    #print(Rt_prev)
    #print(nph.inv_Rt(Rt_prev_inv))
    # project 3d point on image plane
    print("Frame: " + str(i))
    Rt = nph.inv_Rt(Rt_mats[i, :].reshape(4, 4))
    pts2d_left = project_points(imgsz, C, Rt, pts3d)
    pts2d_right = project_points(imgsz, C, Tb.dot(Rt), pts3d)
    # round them up in pixels
    pixelize(imgsz, pts2d_left)
    pixelize(imgsz, pts2d_right)
    update_age(age, pts2d_left, pts2d_right, i)

    projs_left[i, :, :] = pts2d_left
    projs_right[i, :, :] = pts2d_right
    #print("Frame " + str(i) + "\npoints visible: " + "%d / %d" % (visible_num, pts3d.shape[2]))

    #print("Plotting 3d points")
    #visible_pts = getVisibleStatus(projs_left)
    #plot_pts3d(pts3d, visible_pts)
    #plot_flow(pts2d_left, pts2d_right, imgsz)