コード例 #1
0
ファイル: synchronization.py プロジェクト: moneytech/mvus
 def error(M):
     try:
         detect2_temp = np.vstack((detect2[0] - M[-1], detect2[1:]))
         pts1, pts2 = util.match_overlap(detect1, detect2_temp)
         return epipolar.Sampson_error(util.homogeneous(pts1[1:]),
                                       util.homogeneous(pts2[1:]),
                                       M[:9].reshape((3, 3)))
     except:
         return None
コード例 #2
0
ファイル: common.py プロジェクト: moneytech/mvus
    def error_cam(self,cam_id,mode='dist',motion_prior=False,norm=False):
        '''
        Calculate the reprojection errors for a given camera

        Different modes are available: 'dist', 'xy_1D', 'xy_2D', 'each'
        '''

        tck, interval = self.spline['tck'], self.spline['int']
        if motion_prior:
            self.detection_to_global(motion_prior=motion_prior)
        else:
            self.detection_to_global(cam_id)

        _, idx = util.sampling(self.detections_global[cam_id], interval, belong=True)
        detect = np.empty([3,0])
        point_3D = np.empty([3,0])
        for i in range(interval.shape[1]):
            detect_part = self.detections_global[cam_id][:,idx==i+1]
            if detect_part.size:
                if motion_prior:
                    cam_global_traj = self.global_traj[:,self.global_traj[1] == cam_id]
                    _,traj_idx,detect_idx = np.intersect1d(cam_global_traj[3],detect_part[0],assume_unique=True,return_indices=True)
                    detect_part = detect_part[:,detect_idx]
                    detect = np.hstack((detect,detect_part))
                    point_3D = np.hstack((point_3D,cam_global_traj[4:,traj_idx]))
                else:
                    detect = np.hstack((detect,detect_part)) 
                    point_3D = np.hstack((point_3D, np.asarray(interpolate.splev(detect_part[0], tck[i]))))
                
        X = util.homogeneous(point_3D)
        x = detect[1:]
        x_cal = self.cameras[cam_id].projectPoint(X)
        
        #Normalize Tracks
        if norm:
            x_cal = np.dot(np.linalg.inv(self.cameras[cam_id].K), x_cal)
            x = np.dot(np.linalg.inv(self.cameras[cam_id].K), util.homogeneous(x))

        if mode == 'dist':
            return ep.reprojection_error(x, x_cal)
        elif mode == 'xy_1D':
            return np.concatenate((abs(x_cal[0]-x[0]),abs(x_cal[1]-x[1])))
        elif mode == 'xy_2D':
            return np.vstack((abs(x_cal[0]-x[0]),abs(x_cal[1]-x[1])))
        elif mode == 'each':
            error_x = np.zeros_like(self.detections[cam_id][0])
            error_y = np.zeros_like(self.detections[cam_id][0])
            if motion_prior:
                _,det_idx,_ = np.intersect1d(self.detections_global[cam_id][0],detect[0],assume_unique=True,return_indices=True)
                assert det_idx.shape[0] == x_cal.shape[1], '# of detections and traj. points are not equal'
                error_x[det_idx] = abs(x_cal[0]-x[0])
                error_y[det_idx] = abs(x_cal[1]-x[1])
            else:
                error_x[idx.astype(bool)] = abs(x_cal[0]-x[0])
                error_y[idx.astype(bool)] = abs(x_cal[1]-x[1])
            return np.concatenate((error_x, error_y))
コード例 #3
0
    def error_fn(model,output=False):
        alpha, beta = model[0], model[1]
        if gt.shape[0] == 3:
            t_gt = alpha * np.arange(gt.shape[1]) + beta
        else:
            t_gt = alpha * (gt[0]-gt[0,0]) + beta
        _, idx = util.sampling(t_gt, flight.spline['int'])
        gt_part = gt[-3:,idx]
        t_part = t_gt[idx]
        traj = flight.spline_to_traj(t=t_part)

        data = np.vstack((traj[1:],gt_part))
        error = np.zeros(gt.shape[1],dtype=float)

        # result = ransac.vanillaRansac(estimate_M,error_M,data,3,2,100)
        # error[idx] = error_M(result['model'],data)

        M = transformation.affine_matrix_from_points(traj[1:],gt_part,shear=False,scale=True)
        error[idx] = error_M(M.ravel(),data)
        
        if output:
            traj_tran = np.dot(M,util.homogeneous(traj[1:]))
            traj_tran /= traj_tran[-1]
            return np.vstack((traj[0],traj_tran[:3])), gt_part, M, error[idx]
        else:
            return error
コード例 #4
0
ファイル: common.py プロジェクト: moneytech/mvus
    def projectPoint(self,X):

        assert self.P is not None, 'The projection matrix P has not been calculated yet'
        if X.shape[0] == 3:
            X = util.homogeneous(X)
        x = np.dot(self.P,X)
        x /= x[2]
        return x
コード例 #5
0
def error_M(model,data,param=None):
    reconst = data[:3]
    gt = data[3:]
    M = model.reshape(4,4)

    tran = np.dot(M,util.homogeneous(reconst))
    tran /= tran[-1]
    
    return np.sqrt((gt[0]-tran[0])**2 + (gt[1]-tran[1])**2 + (gt[2]-tran[2])**2)
コード例 #6
0
ファイル: common.py プロジェクト: moneytech/mvus
    def init_traj(self,error=10,inlier_only=False):
        '''
        Select the first two cams in the sequence, compute fundamental matrix, triangulate points
        '''

        self.select_most_overlap(init=True)

        t1, t2 = self.sequence[0], self.sequence[1]
        K1, K2 = self.cameras[t1].K, self.cameras[t2].K

        # Find correspondences
        if self.cameras[t1].fps > self.cameras[t2].fps:
            d1, d2 = util.match_overlap(self.detections_global[t1], self.detections_global[t2])
        else:
            d2, d1 = util.match_overlap(self.detections_global[t2], self.detections_global[t1])
        
        # Compute fundamental matrix
        F,inlier = ep.computeFundamentalMat(d1[1:],d2[1:],error=error)
        E = np.dot(np.dot(K2.T,F),K1)

        if not inlier_only:
            inlier = np.ones(len(inlier))
        x1, x2 = util.homogeneous(d1[1:,inlier==1]), util.homogeneous(d2[1:,inlier==1])

        # Find corrected corresponding points for optimal triangulation
        N = d1[1:,inlier==1].shape[1]
        pts1=d1[1:,inlier==1].T.reshape(1,-1,2)
        pts2=d2[1:,inlier==1].T.reshape(1,-1,2)
        m1,m2 = cv2.correctMatches(F,pts1,pts2)
        x1,x2 = util.homogeneous(np.reshape(m1,(-1,2)).T), util.homogeneous(np.reshape(m2,(-1,2)).T)

        mask = np.logical_not(np.isnan(x1[0]))
        x1 = x1[:,mask]
        x2 = x2[:,mask]

        # Triangulte points
        X, P = ep.triangulate_from_E(E,K1,K2,x1,x2)
        self.traj = np.vstack((d1[0][inlier==1][mask],X[:-1]))

        # Assign the camera matrix for these two cameras
        self.cameras[t1].P = np.dot(K1,np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]]))
        self.cameras[t2].P = np.dot(K2,P)
        self.cameras[t1].decompose()
        self.cameras[t2].decompose()
コード例 #7
0
ファイル: common.py プロジェクト: moneytech/mvus
    def undist_point(self,points):
        
        assert points.shape[0]==2, 'Input must be a 2D array'

        num = points.shape[1]

        src = np.ascontiguousarray(points.T).reshape((num,1,2))
        dst = cv2.undistortPoints(src, self.K, self.d)
        dst_unnorm = np.dot(self.K, util.homogeneous(dst.reshape((num,2)).T))

        return dst_unnorm[:2]
コード例 #8
0
def align_gt(flight, f_gt, gt_path, visualize=False):

    if not len(gt_path):
        print('No ground truth data provided\n')
        return
    else:
        try:
            gt_ori = np.loadtxt(gt_path)
        except:
            print('Ground truth not correctly loaded')
            return

    if gt_ori.shape[0] == 3 or gt_ori.shape[0] == 4:
        pass
    elif gt_ori.shape[1] == 3 or gt_ori.shape[1] == 4:
        gt_ori = gt_ori.T
    else:
        raise Exception('Ground truth data have an invalid shape')

    # Pre-processing
    f_reconst = flight.cameras[flight.settings['ref_cam']].fps
    alpha = f_reconst/f_gt

    reconst = flight.spline_to_traj(sampling_rate=alpha)
    t0 = reconst[0,0]
    reconst = np.vstack(((reconst[0]-t0)/alpha,reconst[1:]))
    if gt_ori.shape[0] == 3:
        gt = np.vstack((np.arange(len(gt_ori[0])),gt_ori))
    else:
        gt = np.vstack((gt_ori[0]-gt_ori[0,0],gt_ori[1:]))

    # Coarse search
    thres = int(reconst[0,-1] / 2)
    if int(gt[0,-1]-thres) < 0:
        raise Exception('Ground truth too short!')

    error_min = np.inf
    for i in range(-thres, int(gt[0,-1]-thres)):
        reconst_i = np.vstack((reconst[0]+i,reconst[1:]))
        p1, p2 = util.match_overlap(reconst_i, gt)
        M = transformation.affine_matrix_from_points(p1[1:], p2[1:], shear=False, scale=True)

        tran = np.dot(M, util.homogeneous(p1[1:]))
        tran /= tran[-1]
        error_all = np.sqrt((p2[1]-tran[0])**2 + (p2[2]-tran[1])**2 + (p2[3]-tran[2])**2)
        error = np.mean(error_all)
        if error < error_min:
            error_min = error
            error_coarse = error_all
            j = i
    beta = t0-alpha*j

    # Fine optimization
    ls, res = optimize(alpha,beta,flight,gt_ori)

    # Remove outliers by relative thresholding
    thres = 10
    error_ = res[3]
    idx = error_ <= thres*np.mean(error_)
    reconst_, gt_, error_ = res[0][:,idx], res[1][:,idx], error_[idx]

    # Result
    out = {'align_param':ls.x, 'reconst_tran':reconst_, 'gt':gt_, 'tran_matrix':res[2], 'error':error_}
    print('The mean error (distance) is {:.5f} meter\n'.format(np.mean(out['error'])))
    print('The median error (distance) is {:.5f} meter\n'.format(np.median(out['error'])))

    print(ls.x)

    if visualize:
        # Compare the trajectories
        vis.show_trajectory_3D(out['reconst_tran'][1:], out['gt'], line=False, title='Reconstruction(left) vs Ground Truth(right)')

        # Error histogram
        vis.error_hist(out['error'])

        # Error over the trajectory
        vis.error_traj(out['reconst_tran'][1:], out['error'])

    return out