Esempio n. 1
0
    def select_most_overlap(self,init=False):
        '''
        Select either the initial pair of cameras or the next best camera with largest overlap
        '''

        if not self.find_order:
            return

        self.detection_to_global()
        overlap_max = 0
        
        if init:
            for i in range(self.numCam-1):
                for j in range(i+1,self.numCam):
                    x, y = util.match_overlap(self.detections_global[i],self.detections_global[j])
                    overlap = x.shape[1] / self.cameras[i].fps
                    if overlap > overlap_max:
                        overlap_max = overlap
                        init_pair = [i,j]
            self.sequence = init_pair
        else:
            traj = self.spline_to_traj()
            candidate = []
            for i in range(self.numCam):
                if self.cameras[i].P is None:
                    candidate.append(i)
            for i in candidate:
                interval = util.find_intervals(self.detections_global[i][0])
                overlap, _ = util.sampling(traj[0], interval)

                if len(overlap) > overlap_max:
                    overlap_max = len(overlap)
                    next_cam = i
            self.sequence.append(next_cam)
Esempio n. 2
0
    def error_fn(model,output=False):
        alpha, beta = model[0], model[1]
        if gt.shape[0] == 3:
            t_gt = alpha * np.arange(gt.shape[1]) + beta
        else:
            t_gt = alpha * (gt[0]-gt[0,0]) + beta
        _, idx = util.sampling(t_gt, flight.spline['int'])
        gt_part = gt[-3:,idx]
        t_part = t_gt[idx]
        traj = flight.spline_to_traj(t=t_part)

        data = np.vstack((traj[1:],gt_part))
        error = np.zeros(gt.shape[1],dtype=float)

        # result = ransac.vanillaRansac(estimate_M,error_M,data,3,2,100)
        # error[idx] = error_M(result['model'],data)

        M = transformation.affine_matrix_from_points(traj[1:],gt_part,shear=False,scale=True)
        error[idx] = error_M(M.ravel(),data)
        
        if output:
            traj_tran = np.dot(M,util.homogeneous(traj[1:]))
            traj_tran /= traj_tran[-1]
            return np.vstack((traj[0],traj_tran[:3])), gt_part, M, error[idx]
        else:
            return error
Esempio n. 3
0
    def plot_reprojection(self,interval=np.array([[-np.inf],[np.inf]]),match=True):
        '''
        Given temporal sections of the trajectory, plot the 2D reprojection of these sections for
        each possible camera
        '''

        assert interval.shape[0]==2

        for i in range(self.numCam):
            detect_i, _ = util.sampling(self.detections_global[i],interval)
            traj = self.spline_to_traj(t=detect_i[0])
            
            if traj.size:
                if match:
                    xy,x_ind,y_ind = np.intersect1d(detect_i[0],traj[0],assume_unique=True,return_indices=True)
                    detect_i = detect_i[:,x_ind]
                    traj = traj[:,y_ind]

                try:
                    repro = self.cameras[i].projectPoint(traj[1:])
                except:
                    continue
                
                plt.figure(figsize=(12, 10))
                plt.scatter(detect_i[1],detect_i[2],c='red')
                plt.scatter(repro[0],repro[1],c='blue')
                plt.xlabel('X')
                plt.ylabel('Y')
                plt.suptitle('Camera {}: undistorted detection (red) vs reprojection (blue)'.format(i))

        plt.show()
Esempio n. 4
0
    def get_camera_pose(self, cam_id, error=8, verbose=0):
        '''
        Get the absolute pose of a camera by solving the PnP problem.

        Take care with DISTORSION model!
        '''
        
        tck, interval = self.spline['tck'], self.spline['int']
        self.detection_to_global(cam_id)

        _, idx = util.sampling(self.detections_global[cam_id], interval, belong=True)
        detect = np.empty([3,0])
        point_3D = np.empty([3,0])
        for i in range(interval.shape[1]):
            detect_part = self.detections_global[cam_id][:,idx==i+1]
            if detect_part.size:
                detect = np.hstack((detect,detect_part))
                point_3D = np.hstack((point_3D, np.asarray(interpolate.splev(detect_part[0], tck[i]))))

        # PnP solution from OpenCV
        N = point_3D.shape[1]
        objectPoints = np.ascontiguousarray(point_3D.T).reshape((N,1,3))
        imagePoints  = np.ascontiguousarray(detect[1:].T).reshape((N,1,2))
        distCoeffs = self.cameras[cam_id].d
        retval, rvec, tvec, inliers = cv2.solvePnPRansac(objectPoints, imagePoints, self.cameras[cam_id].K, distCoeffs, reprojectionError=error)

        self.cameras[cam_id].R = cv2.Rodrigues(rvec)[0]
        self.cameras[cam_id].t = tvec.reshape(-1,)
        self.cameras[cam_id].compose()

        if verbose:
            print('{} out of {} points are inliers for PnP'.format(inliers.shape[0], N))
Esempio n. 5
0
    def error_cam(self,cam_id,mode='dist',motion_prior=False,norm=False):
        '''
        Calculate the reprojection errors for a given camera

        Different modes are available: 'dist', 'xy_1D', 'xy_2D', 'each'
        '''

        tck, interval = self.spline['tck'], self.spline['int']
        if motion_prior:
            self.detection_to_global(motion_prior=motion_prior)
        else:
            self.detection_to_global(cam_id)

        _, idx = util.sampling(self.detections_global[cam_id], interval, belong=True)
        detect = np.empty([3,0])
        point_3D = np.empty([3,0])
        for i in range(interval.shape[1]):
            detect_part = self.detections_global[cam_id][:,idx==i+1]
            if detect_part.size:
                if motion_prior:
                    cam_global_traj = self.global_traj[:,self.global_traj[1] == cam_id]
                    _,traj_idx,detect_idx = np.intersect1d(cam_global_traj[3],detect_part[0],assume_unique=True,return_indices=True)
                    detect_part = detect_part[:,detect_idx]
                    detect = np.hstack((detect,detect_part))
                    point_3D = np.hstack((point_3D,cam_global_traj[4:,traj_idx]))
                else:
                    detect = np.hstack((detect,detect_part)) 
                    point_3D = np.hstack((point_3D, np.asarray(interpolate.splev(detect_part[0], tck[i]))))
                
        X = util.homogeneous(point_3D)
        x = detect[1:]
        x_cal = self.cameras[cam_id].projectPoint(X)
        
        #Normalize Tracks
        if norm:
            x_cal = np.dot(np.linalg.inv(self.cameras[cam_id].K), x_cal)
            x = np.dot(np.linalg.inv(self.cameras[cam_id].K), util.homogeneous(x))

        if mode == 'dist':
            return ep.reprojection_error(x, x_cal)
        elif mode == 'xy_1D':
            return np.concatenate((abs(x_cal[0]-x[0]),abs(x_cal[1]-x[1])))
        elif mode == 'xy_2D':
            return np.vstack((abs(x_cal[0]-x[0]),abs(x_cal[1]-x[1])))
        elif mode == 'each':
            error_x = np.zeros_like(self.detections[cam_id][0])
            error_y = np.zeros_like(self.detections[cam_id][0])
            if motion_prior:
                _,det_idx,_ = np.intersect1d(self.detections_global[cam_id][0],detect[0],assume_unique=True,return_indices=True)
                assert det_idx.shape[0] == x_cal.shape[1], '# of detections and traj. points are not equal'
                error_x[det_idx] = abs(x_cal[0]-x[0])
                error_y[det_idx] = abs(x_cal[1]-x[1])
            else:
                error_x[idx.astype(bool)] = abs(x_cal[0]-x[0])
                error_y[idx.astype(bool)] = abs(x_cal[1]-x[1])
            return np.concatenate((error_x, error_y))
Esempio n. 6
0
    def compute_visibility(self):
        '''
        Decide for each raw detection if it is visible from current 3D spline
        '''

        self.visible = []
        interval = self.spline['int']
        self.detection_to_global()

        for cam_id in range(self.numCam):
            _, visible = util.sampling(self.detections_global[cam_id], interval, belong=True)
            self.visible.append(visible)
Esempio n. 7
0
    def cut_detection(self,second=1):
        '''
        Truncate the starting and end part of each continuous part of the detections
        '''

        if not second: return

        for i in range(self.numCam):
            detect = self.detections[i]
            interval = util.find_intervals(detect[0])
            cut = int(self.cameras[i].fps * second)

            interval_long = interval[:,interval[1]-interval[0]>cut*2]
            interval_long[0] += cut
            interval_long[1] -= cut

            assert (interval_long[1]-interval_long[0]>=0).all()

            self.detections[i], _ = util.sampling(detect,interval_long)
Esempio n. 8
0
    def triangulate(self, cam_id, cams, factor_t2s, factor_s2t=0.02, thres=0, refit=True, verbose=0):
        '''
        Triangulate new points to the existing 3D spline and optionally refit it

        cam_id is the new camera
        
        cams must be an iterable that contains cameras that have been processed to build the 3D spline
        '''

        assert self.cameras[cam_id].P is not None, 'The camera pose must be computed first'
        tck, interval = self.spline['tck'], self.spline['int']
        self.detection_to_global(cam_id)

        # Find detections from this camera that haven't been triangulated yet
        _, idx_ex = util.sampling(self.detections_global[cam_id], interval)
        detect_new = self.detections_global[cam_id][:, np.logical_not(idx_ex)]

        # Matching these detections with detections from previous cameras and triangulate them
        X_new = np.empty([4,0])
        for i in cams:
            self.detection_to_global(i)
            detect_ex = self.detections_global[i]

            # Detections of previous cameras are interpolated, no matter the fps
            try:
                x1, x2 = util.match_overlap(detect_new, detect_ex)
            except:
                continue
            else:
                P1, P2 = self.cameras[cam_id].P, self.cameras[i].P
                X_i = ep.triangulate_matlab(x1[1:], x2[1:], P1, P2)
                X_i = np.vstack((x1[0], X_i[:-1]))

                # Check reprojection error directly after triangulation, preserve those with small error
                if thres:
                    err_1 = ep.reprojection_error(x1[1:], self.cameras[cam_id].projectPoint(X_i[1:]))
                    err_2 = ep.reprojection_error(x2[1:], self.cameras[i].projectPoint(X_i[1:]))
                    mask = np.logical_and(err_1<thres, err_2<thres)
                    X_i = X_i[:, mask]
                    
                    if verbose:
                        print('{} out of {} points are triangulated'.format(sum(mask), len(err_1)))

                X_new = np.hstack((X_new, X_i))

                if verbose:
                    print('{} points are triangulated into the 3D spline'.format(X_i.shape[1]))

        _, idx_empty = util.sampling(X_new, interval)
        assert sum(idx_empty)==0, 'Points should not be triangulated into the existing part of the 3D spline'

        # Add these points to the discrete 3D trajectory
        self.spline_to_traj(sampling_rate=factor_s2t)
        self.traj = np.hstack((self.traj, X_new))
        _, idx = np.unique(self.traj[0], return_index=True)
        self.traj = self.traj[:, idx]

        # refit the 3D spline if wanted
        if refit:
            self.traj_to_spline(smooth_factor=factor_t2s)

        return X_new
Esempio n. 9
0
        def jac_BA(near=3,motion_offset=10):

            num_param = len(model)
            self.compute_visibility()

            jac = lil_matrix((1, num_param),dtype=int)
            #jac = np.empty([0,num_param])

            if motion_reg:
                    m_jac = lil_matrix((self.traj.shape[1], num_param),dtype=int)
            elif motion_prior:
                m_jac = lil_matrix((self.global_traj.shape[1], num_param),dtype=int)
            
            for i in range(numCam):
                cam_id = self.sequence[i]
                num_detect = self.detections[cam_id].shape[1]

                # consider only reprojection in x direction, which is the same in y direction
                jac_cam = lil_matrix((num_detect, num_param),dtype=int)
                #jac_cam = np.zeros((num_detect, num_param))

                # alpha and beta
                try:
                    jac_cam[:,[i,i+numCam]] = 1 if self.settings['opt_sync'] else 0
                except:
                    jac_cam[:,[i,i+numCam]] = 1

                # rolling shutter
                if rs:
                    jac_cam[:,i+numCam*2] = 1
                else:
                    jac_cam[:,i+numCam*2] = 0

                # camera parameters
                start = 3*numCam+i*num_camParam
                jac_cam[:,start:start+num_camParam] = 1

                if motion_prior:
                    traj_start = numCam * (3+num_camParam)
                    traj_len = self.global_traj.shape[1]
                    for j in range(num_detect):
                        # Verify traj. point lies within current spline interval
                        if self.visible[cam_id][j]:
                            timestamp = self.detections_global[cam_id][0,j]
                            traj_pnt = np.where(self.global_traj[3] == timestamp)[0]
                            traj_pnt += traj_start
                            if (traj_pnt-traj_start) < motion_offset:
                                traj_idx = np.arange(traj_start,traj_pnt+motion_offset)   
                            else:
                                traj_idx = np.arange(traj_pnt-motion_offset,traj_pnt+motion_offset) 
                                
                            traj_idx = np.concatenate((traj_idx, traj_idx+traj_len, traj_idx+2*traj_len))
                            
                            if np.array(traj_idx < num_param).all():
                                jac_cam[j,traj_idx] = 1 
                            else:
                                jac_cam[j,traj_idx[traj_idx < num_param]] = 1         
                        else:
                            jac_cam[j] = 0
                        
                    jac = vstack((jac, vstack([jac_cam,jac_cam])))
                # spline parameters
                else:
                    for j in range(num_detect):
                        spline_id = self.visible[cam_id][j]

                        # Find the corresponding spline for each detecion
                        if spline_id:
                            spline_id -= 1
                            knot = self.spline['tck'][spline_id][0][2:-2]
                            timestamp = self.detections_global[cam_id][0,j]
                            knot_idx = np.argsort(abs(knot-timestamp))[:near]
                            knot_idx = np.concatenate((knot_idx, knot_idx+len(knot), knot_idx+2*len(knot)))
                            jac_cam[j,idx_spline_sum[0,spline_id]+knot_idx] = 1

                        else:
                            jac_cam[j,:] = 0

                    jac = vstack((jac, vstack([jac_cam,jac_cam])))
                    #jac = np.vstack((jac, np.tile(jac_cam,(2,1))))

            if motion_reg:
                tck, interval = self.spline['tck'], self.spline['int']
                for j in range(self.traj.shape[1]):
                    _, spline_id = util.sampling(self.traj[:,j], interval, belong=True)
                    detect = np.empty([3,0])
                    point_3D = np.empty([3,0])
                    
                    # Find the corresponding spline for each interpolated point
                    spline_id[0] -= 1
                    knot = self.spline['tck'][spline_id[0]][0][2:-2]
                    timestamp = self.traj[0,j]
                    knot_idx = np.argsort(abs(knot-timestamp))[:near]
                    knot_idx = np.concatenate((knot_idx, knot_idx+len(knot), knot_idx+2*len(knot)))
                    m_jac[j,idx_spline_sum[0,spline_id[0]]+knot_idx] = 1
                jac = vstack((jac, m_jac))
            
            elif motion_prior:
                m_jac = lil_matrix((self.global_traj.shape[1], num_param),dtype=int)
                traj_start = numCam * (3+num_camParam)
                for j in range(self.global_traj.shape[1]):
                        m_jac[j] = 0
                        if j < motion_offset:
                           m_traj_idx = np.arange(0,j+motion_offset) 
                           m_traj_idx += traj_start#
                        else:
                            m_traj_idx = np.arange(j-motion_offset,j+motion_offset) 
                            m_traj_idx += traj_start
                        m_traj_idx = np.concatenate((m_traj_idx, m_traj_idx+traj_len, m_traj_idx+2*traj_len))
                        
                        if np.array(m_traj_idx < num_param).all():
                            m_jac[j,m_traj_idx] = 1
                        else:
                            m_jac[j,m_traj_idx[m_traj_idx < num_param]] = 1
                
                jac = vstack((jac, m_jac))
                
            # fix the first camera
            # jac[:,[0,numCam]], jac[:,2*numCam+4:2*numCam+10] = 0, 0
            #return jac
            return jac.toarray()[1:]
Esempio n. 10
0
    def error_motion(self,cams,mode='dist',norm=False,motion_weights=0,motion_reg = False,motion_prior = False):
        '''
        Calculate the reprojection errors for a given camera for a multi_spline object. 

        - Accounts for motion prior

        - Motion priors available: 'F', 'KE'

        - computes error for motion prior regularization terms 
        '''

        interval = self.spline['int']
        
        # Update global_detections and global_traj timestamps
        self.detection_to_global(cams,motion_prior=True)
        # Update global_traj for motion_reg
        if motion_reg:
            self.spline_to_traj()
            _, idx = util.sampling(self.traj[0], interval, belong=True)
        if motion_prior:
            _, idx = util.sampling(self.global_traj[3], interval, belong=True)

        detect = np.empty([3,0])
        point_3D = np.empty([3,0])
        temp_glob_ts = np.array([])
        mot_err_res = np.array([])
         

        if motion_prior:
            global_traj_ts = np.array([])
            for i in range(interval.shape[1]):
                traj_part = self.global_traj[:,idx==i+1]
                if traj_part.size:
                    weights = np.ones(traj_part.shape[1]) * motion_weights
                    mot_err = self.motion_prior(traj_part[3:],weights,prior=self.settings['motion_type'])
                    mot_err_res = np.concatenate((mot_err_res, mot_err))
                    if self.settings['motion_type'] == 'F':
                        global_traj_ts = np.concatenate((global_traj_ts, traj_part[3,1:-1]))
                    else:
                        global_traj_ts = np.concatenate((global_traj_ts, traj_part[3,1:]))
            motion_error = np.zeros((self.global_traj.shape[1]))
            _,traj_idx,_ = np.intersect1d(self.global_traj[3],global_traj_ts,assume_unique=True,return_indices=True)
            assert traj_idx.shape[0] == mot_err_res.shape[0], 'wrong number of global_traj points'
            motion_error[traj_idx] = mot_err_res
        
        elif motion_reg :
            traj_ts = np.array([]) 
            motion_error = np.zeros((self.traj.shape[1]))
            for i in range(interval.shape[1]):
                traj_part = self.traj[:,idx==i+1]
                if traj_part.size:
                    weights = np.ones(traj_part.shape[1]) * motion_weights
                    mot_err = self.motion_prior(traj_part,weights,prior=self.settings['motion_type'])
                    mot_err_res = np.concatenate((mot_err_res, mot_err))
                    assert self.settings['motion_type'] == 'F' or self.settings['motion_type'] == 'KE','Motion type must be either F or KE' 
                    if self.settings['motion_type'] == 'F':
                        traj_ts = np.concatenate((traj_ts, traj_part[0,1:-1]))  
                    elif self.settings['motion_type'] == 'KE':
                        traj_ts = np.concatenate((traj_ts, traj_part[0,1:])) 
            _,traj_idx,_ = np.intersect1d(self.traj[0],traj_ts,assume_unique=True,return_indices=True)
            motion_error[traj_idx] = mot_err_res
            
        return motion_error