コード例 #1
0
    def initializeGraphFromObsDb(self, obs_db):
        t0 = time.time()

        #create graph and label the vertices
        G = igraph.Graph(self.numCams)
        for id, vert in enumerate(G.vs):
            vert["label"] = "cam{0}".format(id)

        #go through all times
        for timestamp in self.obs_db.getAllViewTimestamps():
            #cameras that have a target view at this timestamp instant
            cam_ids_at_timestamp = set(obs_db.getCamIdsAtTimestamp(timestamp))

            #go through all edges of the graph and check if we have common corners
            possible_edges = itertools.combinations(cam_ids_at_timestamp, 2)

            for edge in possible_edges:
                cam_id_A = edge[0]
                cam_id_B = edge[1]

                #and check them against the other cams for common corners (except against itself...)
                corners_A = self.obs_db.getCornerIdsAtTime(timestamp, cam_id_A)
                obs_id_A = self.obs_db.getObsIdForCamAtTime(
                    timestamp, cam_id_A)
                corners_B = self.obs_db.getCornerIdsAtTime(timestamp, cam_id_B)
                obs_id_B = self.obs_db.getObsIdForCamAtTime(
                    timestamp, cam_id_B)

                common_corners = corners_A & corners_B

                #add graph edge if we found common corners
                if common_corners:
                    #add edege if it isn't existing yet
                    try:
                        edge_idx = G.get_eid(cam_id_A, cam_id_B)
                    except:
                        G.add_edges([(cam_id_A, cam_id_B)])
                        edge_idx = G.get_eid(cam_id_A, cam_id_B)
                        G.es[edge_idx]["obs_ids"] = list()
                        G.es[edge_idx]["weight"] = 0

                    #store the observation of the camera if the lower id first on the edge
                    G.es[edge_idx]["weight"] += len(common_corners)
                    G.es[edge_idx]["obs_ids"].append((
                        obs_id_A,
                        obs_id_B) if cam_id_A < cam_id_B else (obs_id_B,
                                                               obs_id_A))

        #store the graph
        self.G = G

        #timing
        t1 = time.time()
        total = t1 - t0
        sm.logDebug("It took {0}s to build the graph.".format(total))
コード例 #2
0
ファイル: MulticamGraph.py プロジェクト: AliAlawieh/kalibr
    def initializeGraphFromObsDb(self, obs_db):        
        t0 = time.time()

        #create graph and label the vertices
        G = igraph.Graph(self.numCams)
        for id, vert in enumerate(G.vs):
            vert["label"] = "cam{0}".format(id)
        
        #go through all times
        for timestamp in self.obs_db.getAllViewTimestamps():
            #cameras that have a target view at this timestamp instant
            cam_ids_at_timestamp = set( obs_db.getCamIdsAtTimestamp(timestamp) )
            
            #go through all edges of the graph and check if we have common corners
            possible_edges = itertools.combinations(cam_ids_at_timestamp, 2)
            
            for edge in possible_edges:
                cam_id_A = edge[0]
                cam_id_B = edge[1]
                
                #and check them against the other cams for common corners (except against itself...)
                corners_A = self.obs_db.getCornerIdsAtTime(timestamp, cam_id_A)
                obs_id_A = self.obs_db.getObsIdForCamAtTime(timestamp, cam_id_A)
                corners_B = self.obs_db.getCornerIdsAtTime(timestamp, cam_id_B)
                obs_id_B = self.obs_db.getObsIdForCamAtTime(timestamp, cam_id_B)
                
                common_corners = corners_A & corners_B
                
                #add graph edge if we found common corners
                if common_corners:
                    #add edege if it isn't existing yet
                    try:
                        edge_idx = G.get_eid(cam_id_A, cam_id_B)
                    except:
                        G.add_edges([(cam_id_A, cam_id_B)])
                        edge_idx = G.get_eid(cam_id_A, cam_id_B)
                        G.es[edge_idx]["obs_ids"] = list()
                        G.es[edge_idx]["weight"] = 0
                    
                    #store the observation of the camera if the lower id first on the edge
                    G.es[edge_idx]["weight"] += len(common_corners)
                    G.es[edge_idx]["obs_ids"].append( (obs_id_A, obs_id_B) if cam_id_A<cam_id_B else (obs_id_B, obs_id_A) )
        
        #store the graph  
        self.G = G
        
        #timing
        t1 = time.time()
        total = t1-t0
        sm.logDebug("It took {0}s to build the graph.".format(total))
コード例 #3
0
    def addTargetView(self, rig_observations, T_tc_guess, force=False):
        #create the problem for this batch and try to add it
        batch_problem = CalibrationTargetOptimizationProblem.fromTargetViewObservations(self.cameras, self.target, self.baselines, T_tc_guess, rig_observations, useBlakeZissermanMest=self.useBlakeZissermanMest)
        self.estimator_return_value = self.estimator.addBatch(batch_problem, force)

        if self.estimator_return_value.numIterations >= self.optimizerOptions.maxIterations:
            sm.logError("Did not converge in maxIterations... restarting...")
            raise OptimizationDiverged

        success = self.estimator_return_value.batchAccepted
        if success:
            sm.logDebug("The estimator accepted this batch")
            self.views.append(batch_problem)
        else:
            sm.logDebug("The estimator did not accept this batch")
        return success
コード例 #4
0
 def addTargetView(self, rig_observations, T_tc_guess, force=False):
     #create the problem for this batch and try to add it 
     batch_problem = CalibrationTargetOptimizationProblem.fromTargetViewObservations(self.cameras, self.target, self.baselines, T_tc_guess, rig_observations, useBlakeZissermanMest=self.useBlakeZissermanMest)
     self.estimator_return_value = self.estimator.addBatch(batch_problem, force)
     
     if self.estimator_return_value.numIterations >= self.optimizerOptions.maxIterations:
         sm.logError("Did not converge in maxIterations... restarting...")
         raise OptimizationDiverged
     
     success = self.estimator_return_value.batchAccepted
     if success:
         sm.logDebug("The estimator accepted this batch")
         self.views.append(batch_problem)
     else:
         sm.logDebug("The estimator did not accept this batch")
     return success
コード例 #5
0
ファイル: ToyLearner.py プロジェクト: uschwes/planning_tools
def adaptNumberOfSamples(iteration,
                         learner,
                         nlow=50,
                         nhigh=500,
                         cntLow=10,
                         cntHigh=20):
    errorTermOptions = learner.options.etOptions
    if iteration <= cntLow:
        errorTermOptions.nMcmcSamplesForMean = nlow
    elif iteration < cntHigh and iteration > cntLow:
        errorTermOptions.nMcmcSamplesForMean = nlow + (iteration - cntLow) * (
            nhigh - nlow) / (cntHigh - cntLow)
    else:
        errorTermOptions.nMcmcSamplesForMean = nhigh
    learner.setErrorTermOptions(errorTermOptions)
    sm.logDebug("Setting number of samples for expectation to {0}".format(
        learner.options.etOptions.nMcmcSamplesForMean))
コード例 #6
0
    def findTimeshiftCameraImuPrior(self, imu, verbose=False):
        print "Estimating time shift camera to imu:"

        #fit a spline to the camera observations
        poseSpline = self.initPoseSplineFromCamera(timeOffsetPadding=0.0)

        #predict time shift prior
        t = []
        omega_measured_norm = []
        omega_predicted_norm = []

        for im in imu.imuData:
            tk = im.stamp.toSec()
            if tk > poseSpline.t_min() and tk < poseSpline.t_max():

                #get imu measurements and spline from camera
                omega_measured = im.omega
                omega_predicted = aopt.EuclideanExpression(
                    np.matrix(
                        poseSpline.angularVelocityBodyFrame(tk)).transpose())

                #calc norm
                t = np.hstack((t, tk))
                omega_measured_norm = np.hstack(
                    (omega_measured_norm, np.linalg.norm(omega_measured)))
                omega_predicted_norm = np.hstack(
                    (omega_predicted_norm,
                     np.linalg.norm(omega_predicted.toEuclidean())))

        #get the time shift
        corr = np.correlate(omega_predicted_norm, omega_measured_norm, "full")
        discrete_shift = corr.argmax() - (np.size(omega_measured_norm) - 1)

        #get cont. time shift
        times = [im.stamp.toSec() for im in imu.imuData]
        dT = np.mean(np.diff(times))
        shift = -discrete_shift * dT

        #Create plots
        if verbose:
            pl.plot(t, omega_measured_norm, label="measured_raw")
            pl.plot(t, omega_predicted_norm, label="predicted")
            pl.plot(t - shift, omega_measured_norm, label="measured_corrected")
            pl.legend()
            pl.title("Time shift prior camera-imu estimation")
            pl.figure()
            pl.plot(corr)
            pl.title(
                "Cross-correlation ||omega_predicted||, ||omega_measured||")
            pl.show()
            sm.logDebug("discrete time shift: {0}".format(discrete_shift))
            sm.logDebug("cont. time shift: {0}".format(shift))
            sm.logDebug("dT: {0}".format(dT))

        #store the timeshift (t_imu = t_cam + timeshiftCamToImuPrior)
        self.timeshiftCamToImuPrior = shift

        print "  Time shift camera to imu (t_imu = t_cam + shift):"
        print self.timeshiftCamToImuPrior
コード例 #7
0
ファイル: IccSensors.py プロジェクト: AliAlawieh/kalibr
    def findTimeshiftCameraImuPrior(self, imu, verbose=False):
        print "Estimating time shift camera to imu:"
        
        #fit a spline to the camera observations
        poseSpline = self.initPoseSplineFromCamera( timeOffsetPadding=0.0 )
        
        #predict time shift prior 
        t=[]
        omega_measured_norm = []
        omega_predicted_norm = []
        
        for im in imu.imuData:
            tk = im.stamp.toSec()
            if tk > poseSpline.t_min() and tk < poseSpline.t_max():
                
                #get imu measurements and spline from camera
                omega_measured = im.omega
                omega_predicted = aopt.EuclideanExpression( np.matrix( poseSpline.angularVelocityBodyFrame( tk ) ).transpose() )

                #calc norm
                t = np.hstack( (t, tk) )
                omega_measured_norm = np.hstack( (omega_measured_norm, np.linalg.norm( omega_measured ) ))
                omega_predicted_norm = np.hstack( (omega_predicted_norm, np.linalg.norm( omega_predicted.toEuclidean() )) )
        
        #get the time shift
        corr = np.correlate(omega_predicted_norm, omega_measured_norm, "full")
        discrete_shift = corr.argmax() - (np.size(omega_measured_norm) - 1)
        
        #get cont. time shift
        times = [im.stamp.toSec() for im in imu.imuData]
        dT = np.mean(np.diff( times ))
        shift = -discrete_shift*dT
        
        #Create plots
        if verbose:
            pl.plot(t, omega_measured_norm, label="measured_raw")
            pl.plot(t, omega_predicted_norm, label="predicted")
            pl.plot(t-shift, omega_measured_norm, label="measured_corrected")
            pl.legend()
            pl.title("Time shift prior camera-imu estimation")
            pl.figure()
            pl.plot(corr)
            pl.title("Cross-correlation ||omega_predicted||, ||omega_measured||")
            pl.show()
            sm.logDebug("discrete time shift: {0}".format(discrete_shift))
            sm.logDebug("cont. time shift: {0}".format(shift))
            sm.logDebug("dT: {0}".format(dT))
        
        #store the timeshift (t_imu = t_cam + timeshiftCamToImuPrior)
        self.timeshiftCamToImuPrior = shift
        
        print "  Time shift camera to imu (t_imu = t_cam + shift):"
        print self.timeshiftCamToImuPrior
コード例 #8
0
    def fromTargetViewObservations(cls,
                                   cameras,
                                   target,
                                   baselines,
                                   T_tc_guess,
                                   rig_observations,
                                   useBlakeZissermanMest=True):
        rval = CalibrationTargetOptimizationProblem()

        #store the arguements in case we want to rebuild a modified problem
        rval.cameras = cameras
        rval.target = target
        rval.baselines = baselines
        rval.T_tc_guess = T_tc_guess
        rval.rig_observations = rig_observations

        # 1. Create a design variable for this pose
        T_target_camera = T_tc_guess

        rval.dv_T_target_camera = aopt.TransformationDv(T_target_camera)
        for i in range(0, rval.dv_T_target_camera.numDesignVariables()):
            rval.addDesignVariable(
                rval.dv_T_target_camera.getDesignVariable(i),
                TRANSFORMATION_GROUP_ID)

        #2. add all baselines DVs
        for baseline_dv in baselines:
            for i in range(0, baseline_dv.numDesignVariables()):
                rval.addDesignVariable(baseline_dv.getDesignVariable(i),
                                       CALIBRATION_GROUP_ID)

        #3. add landmark DVs
        for p in target.P_t_dv:
            rval.addDesignVariable(p, LANDMARK_GROUP_ID)

        #4. add camera DVs
        for camera in cameras:
            if not camera.isGeometryInitialized:
                raise RuntimeError(
                    'The camera geometry is not initialized. Please initialize with initGeometry() or initGeometryFromDataset()'
                )
            camera.setDvActiveStatus(True, True, False)
            rval.addDesignVariable(camera.dv.distortionDesignVariable(),
                                   CALIBRATION_GROUP_ID)
            rval.addDesignVariable(camera.dv.projectionDesignVariable(),
                                   CALIBRATION_GROUP_ID)
            rval.addDesignVariable(camera.dv.shutterDesignVariable(),
                                   CALIBRATION_GROUP_ID)

        #4.add all observations for this view
        cams_in_view = set()
        rval.rerrs = dict()
        rerr_cnt = 0
        for cam_id, obs in rig_observations:
            camera = cameras[cam_id]
            cams_in_view.add(cam_id)

            #add reprojection errors
            #build baseline chain (target->cam0->baselines->camN)
            T_cam0_target = rval.dv_T_target_camera.expression.inverse()
            T_camN_calib = T_cam0_target
            for idx in range(0, cam_id):
                T_camN_calib = baselines[idx].toExpression() * T_camN_calib

            # \todo pass in the detector uncertainty somehow.
            cornerUncertainty = 1.0
            R = np.eye(2) * cornerUncertainty * cornerUncertainty
            invR = np.linalg.inv(R)

            rval.rerrs[cam_id] = list()
            for i in range(0, len(target.P_t_ex)):
                p_target = target.P_t_ex[i]
                valid, y = obs.imagePoint(i)
                if valid:
                    rerr_cnt += 1
                    # Create an error term.
                    rerr = camera.model.reprojectionError(
                        y, invR, T_camN_calib * p_target, camera.dv)
                    rerr.idx = i

                    #add blake-zisserman mest
                    if useBlakeZissermanMest:
                        mest = aopt.BlakeZissermanMEstimator(2.0)
                        rerr.setMEstimatorPolicy(mest)
                    rval.addErrorTerm(rerr)
                    rval.rerrs[cam_id].append(rerr)
                else:
                    rval.rerrs[cam_id].append(None)

        sm.logDebug(
            "Adding a view with {0} cameras and {1} error terms".format(
                len(cams_in_view), rerr_cnt))
        return rval
コード例 #9
0
ファイル: MulticamGraph.py プロジェクト: AliAlawieh/kalibr
    def getInitialGuesses(self, cameras):
        
        if not self.G:
            raise RuntimeError("Graph is uninitialized!")
        
        #################################################################
        ## STEP 0: check if all cameras in the chain are connected
        ##         through common target point observations
        ##         (=all vertices connected?)
        #################################################################
        if not self.isGraphConnected():
            sm.logError("The cameras are not connected through mutual target observations! " 
                        "Please provide another dataset...")
            
            self.plotGraph()
            sys.exit(0)
        
        #################################################################
        ## STEP 1: get baseline initial guesses by calibrating good 
        ##         camera pairs using a stereo calibration
        ## 
        #################################################################

        #first we need to find the best camera pairs to obtain the initial guesses
        #--> use the pairs that share the most common observed target corners
        #The graph is built with weighted edges that represent the number of common
        #target corners, so we can use dijkstras algorithm to get the best pair
        #configuration for the initial pair calibrations
        weights = [1.0/commonPoints for commonPoints in self.G.es["weight"]]

        #choose the cam with the least edges as base_cam
        outdegrees = self.G.vs.outdegree()
        base_cam_id = outdegrees.index(min(outdegrees))

        #solve for shortest path  (=optimal transformation chaining)
        edges_on_path = self.G.get_shortest_paths(0, weights=weights, output="epath")
        
        self.optimal_baseline_edges = set([item for sublist in edges_on_path for item in sublist])
        
        
        #################################################################
        ## STEP 2: solve stereo calibration problem for the baselines
        ##         (baselines are always from lower_id to higher_id cams!)
        #################################################################
        
        #calibrate all cameras in pairs
        for baseline_edge_id in self.optimal_baseline_edges:

            #get the cam_nrs from the graph edge (calibrate from low to high id)
            vertices = self.G.es[baseline_edge_id].tuple
            if vertices[0]<vertices[1]:
                camL_nr = vertices[0]
                camH_nr = vertices[1]
            else:
                camL_nr = vertices[1]
                camH_nr = vertices[0]
            
            print "\t initializing camera pair ({0},{1})...  ".format(camL_nr, camH_nr)          

            #run the pair extrinsic calibration
            obs_list = self.obs_db.getAllObsTwoCams(camL_nr, camH_nr)
            success, baseline_HL = kcc.stereoCalibrate(cameras[camL_nr], 
                                                       cameras[camH_nr], 
                                                       obs_list,
                                                       distortionActive=False)
            
            if success:
                sm.logDebug("baseline_{0}_{1}={2}".format(camL_nr, camH_nr, baseline_HL.T()))
            else:
                sm.logError("initialization of camera pair ({0},{1}) failed  ".format(camL_nr, camH_nr))
                sm.logError("estimated baseline_{0}_{1}={2}".format(camL_nr, camH_nr, baseline_HL.T()))
        
            #store the baseline in the graph
            self.G.es[ self.G.get_eid(camL_nr, camH_nr) ]["baseline_HL"] = baseline_HL
        
        #################################################################
        ## STEP 3: transform from the "optimal" baseline chain to camera chain ordering
        ##         (=> baseline_0 = T_c1_c0 | 
        #################################################################
        
        #construct the optimal path graph
        G_optimal_baselines = self.G.copy()
        
        eid_not_optimal_path = set(range(0,len(G_optimal_baselines.es)))
        for eid in self.optimal_baseline_edges:
            eid_not_optimal_path.remove(eid)
        G_optimal_baselines.delete_edges( eid_not_optimal_path )
        
        #now we convert the arbitary baseline graph to baselines starting from 
        # cam0 and traverse the chain (cam0->cam1->cam2->camN)
        baselines = []
        for baseline_id in range(0, self.numCams-1):
            #find the shortest path on the graph
            path = G_optimal_baselines.get_shortest_paths(baseline_id, baseline_id+1)[0]
            
            #get the baseline from cam with id baseline_id to baseline_id+1
            baseline_HL = sm.Transformation()
            for path_idx in range(0, len(path)-1):
                source_vert = path[path_idx]
                target_vert = path[path_idx+1]
                T_edge = self.G.es[ self.G.get_eid(source_vert, target_vert) ]["baseline_HL"]
            
                #correct the direction (baselines always from low to high cam id!)
                T_edge = T_edge if source_vert<target_vert else T_edge.inverse()
            
                #chain up
                baseline_HL = T_edge * baseline_HL
            
            #store in graph
            baselines.append(baseline_HL)
 
        #################################################################
        ## STEP 4: refine guess in full batch
        #################################################################
        success, baselines = kcc.solveFullBatch(cameras, baselines, self)
        
        if not success:
            sm.logWarn("Full batch refinement failed!")
    
        return baselines
コード例 #10
0
    def getInitialGuesses(self, cameras):

        if not self.G:
            raise RuntimeError("Graph is uninitialized!")

        #################################################################
        ## STEP 0: check if all cameras in the chain are connected
        ##         through common target point observations
        ##         (=all vertices connected?)
        #################################################################
        if not self.isGraphConnected():
            sm.logError(
                "The cameras are not connected through mutual target observations! "
                "Please provide another dataset...")

            self.plotGraph()
            sys.exit(0)

        #################################################################
        ## STEP 1: get baseline initial guesses by calibrating good
        ##         camera pairs using a stereo calibration
        ##
        #################################################################

        #first we need to find the best camera pairs to obtain the initial guesses
        #--> use the pairs that share the most common observed target corners
        #The graph is built with weighted edges that represent the number of common
        #target corners, so we can use dijkstras algorithm to get the best pair
        #configuration for the initial pair calibrations
        weights = [1.0 / commonPoints for commonPoints in self.G.es["weight"]]

        #choose the cam with the least edges as base_cam
        outdegrees = self.G.vs.outdegree()
        base_cam_id = outdegrees.index(min(outdegrees))

        #solve for shortest path  (=optimal transformation chaining)
        edges_on_path = self.G.get_shortest_paths(0,
                                                  weights=weights,
                                                  output="epath")

        self.optimal_baseline_edges = set(
            [item for sublist in edges_on_path for item in sublist])

        #################################################################
        ## STEP 2: solve stereo calibration problem for the baselines
        ##         (baselines are always from lower_id to higher_id cams!)
        #################################################################

        #calibrate all cameras in pairs
        for baseline_edge_id in self.optimal_baseline_edges:

            #get the cam_nrs from the graph edge (calibrate from low to high id)
            vertices = self.G.es[baseline_edge_id].tuple
            if vertices[0] < vertices[1]:
                camL_nr = vertices[0]
                camH_nr = vertices[1]
            else:
                camL_nr = vertices[1]
                camH_nr = vertices[0]

            print "\t initializing camera pair ({0},{1})...  ".format(
                camL_nr, camH_nr)

            #run the pair extrinsic calibration
            obs_list = self.obs_db.getAllObsTwoCams(camL_nr, camH_nr)
            success, baseline_HL = kcc.stereoCalibrate(cameras[camL_nr],
                                                       cameras[camH_nr],
                                                       obs_list,
                                                       distortionActive=False)

            if success:
                sm.logDebug("baseline_{0}_{1}={2}".format(
                    camL_nr, camH_nr, baseline_HL.T()))
            else:
                sm.logError(
                    "initialization of camera pair ({0},{1}) failed  ".format(
                        camL_nr, camH_nr))
                sm.logError("estimated baseline_{0}_{1}={2}".format(
                    camL_nr, camH_nr, baseline_HL.T()))

            #store the baseline in the graph
            self.G.es[self.G.get_eid(camL_nr,
                                     camH_nr)]["baseline_HL"] = baseline_HL

        #################################################################
        ## STEP 3: transform from the "optimal" baseline chain to camera chain ordering
        ##         (=> baseline_0 = T_c1_c0 |
        #################################################################

        #construct the optimal path graph
        G_optimal_baselines = self.G.copy()

        eid_not_optimal_path = set(range(0, len(G_optimal_baselines.es)))
        for eid in self.optimal_baseline_edges:
            eid_not_optimal_path.remove(eid)
        G_optimal_baselines.delete_edges(eid_not_optimal_path)

        #now we convert the arbitary baseline graph to baselines starting from
        # cam0 and traverse the chain (cam0->cam1->cam2->camN)
        baselines = []
        for baseline_id in range(0, self.numCams - 1):
            #find the shortest path on the graph
            path = G_optimal_baselines.get_shortest_paths(
                baseline_id, baseline_id + 1)[0]

            #get the baseline from cam with id baseline_id to baseline_id+1
            baseline_HL = sm.Transformation()
            for path_idx in range(0, len(path) - 1):
                source_vert = path[path_idx]
                target_vert = path[path_idx + 1]
                T_edge = self.G.es[self.G.get_eid(source_vert,
                                                  target_vert)]["baseline_HL"]

                #correct the direction (baselines always from low to high cam id!)
                T_edge = T_edge if source_vert < target_vert else T_edge.inverse(
                )

                #chain up
                baseline_HL = T_edge * baseline_HL

            #store in graph
            baselines.append(baseline_HL)

        #################################################################
        ## STEP 4: refine guess in full batch
        #################################################################
        success, baselines = kcc.solveFullBatch(cameras, baselines, self)

        if not success:
            sm.logWarn("Full batch refinement failed!")

        return baselines
コード例 #11
0
def solveFullBatch(cameras, baseline_guesses, graph):
    ############################################
    ## solve the bundle adjustment
    ############################################
    problem = aopt.OptimizationProblem()

    #add camera dvs
    for cam in cameras:
        cam.setDvActiveStatus(True, True, False)
        problem.addDesignVariable(cam.dv.distortionDesignVariable())
        problem.addDesignVariable(cam.dv.projectionDesignVariable())
        problem.addDesignVariable(cam.dv.shutterDesignVariable())

    baseline_dvs = list()
    for baseline_idx in range(0, len(cameras) - 1):
        baseline_dv = aopt.TransformationDv(baseline_guesses[baseline_idx])

        for i in range(0, baseline_dv.numDesignVariables()):
            problem.addDesignVariable(baseline_dv.getDesignVariable(i))

        baseline_dvs.append(baseline_dv)

    #corner uncertainty
    cornerUncertainty = 1.0
    R = np.eye(2) * cornerUncertainty * cornerUncertainty
    invR = np.linalg.inv(R)

    #get the target
    target = cameras[0].ctarget.detector.target()

    #Add calibration target reprojection error terms for all camera in chain
    target_pose_dvs = list()

    #shuffle the views
    reprojectionErrors = []
    timestamps = graph.obs_db.getAllViewTimestamps()
    for view_id, timestamp in enumerate(timestamps):

        #get all observations for all cams at this time
        obs_tuple = graph.obs_db.getAllObsAtTimestamp(timestamp)

        #create a target pose dv for all target views (= T_cam0_w)
        T0 = graph.getTargetPoseGuess(timestamp, cameras, baseline_guesses)
        target_pose_dv = addPoseDesignVariable(problem, T0)
        target_pose_dvs.append(target_pose_dv)

        for cidx, obs in obs_tuple:
            cam = cameras[cidx]

            #calibration target coords to camera X coords
            T_cam0_calib = target_pose_dv.toExpression().inverse()

            #build pose chain (target->cam0->baselines->camN)
            T_camN_calib = T_cam0_calib
            for idx in range(0, cidx):
                T_camN_calib = baseline_dvs[idx].toExpression() * T_camN_calib

            ## add error terms
            for i in range(0, target.size()):
                p_target = aopt.HomogeneousExpression(
                    sm.toHomogeneous(target.point(i)))
                valid, y = obs.imagePoint(i)
                if valid:
                    rerr = cameras[cidx].model.reprojectionError(
                        y, invR, T_camN_calib * p_target, cameras[cidx].dv)
                    problem.addErrorTerm(rerr)
                    reprojectionErrors.append(rerr)

    sm.logDebug("solveFullBatch: added {0} camera error terms".format(
        len(reprojectionErrors)))

    ############################################
    ## solve
    ############################################
    options = aopt.Optimizer2Options()
    options.verbose = True if sm.getLoggingLevel(
    ) == sm.LoggingLevel.Debug else False
    options.nThreads = 4
    options.convergenceDeltaX = 1e-3
    options.convergenceDeltaJ = 1
    options.maxIterations = 250
    options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)

    optimizer = aopt.Optimizer2(options)
    optimizer.setProblem(problem)

    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        sm.logDebug("Before optimization:")
        e2 = np.array([e.evaluateError() for e in reprojectionErrors])
        sm.logDebug(
            " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))

    #run intrinsic calibration
    try:
        retval = optimizer.optimize()
        if retval.linearSolverFailure:
            sm.logError("calibrateIntrinsics: Optimization failed!")
        success = not retval.linearSolverFailure

    except:
        sm.logError("calibrateIntrinsics: Optimization failed!")
        success = False

    baselines = list()
    for baseline_dv in baseline_dvs:
        baselines.append(sm.Transformation(baseline_dv.T()))

    return success, baselines
コード例 #12
0
def calibrateIntrinsics(cam_geometry,
                        obslist,
                        distortionActive=True,
                        intrinsicsActive=True):
    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        d = cam_geometry.geometry.projection().distortion().getParameters(
        ).flatten()
        p = cam_geometry.geometry.projection().getParameters().flatten()
        sm.logDebug("calibrateIntrinsics: intrinsics guess: {0}".format(p))
        sm.logDebug("calibrateIntrinsics: distortion guess: {0}".format(d))

    ############################################
    ## solve the bundle adjustment
    ############################################
    problem = aopt.OptimizationProblem()

    #add camera dvs
    cam_geometry.setDvActiveStatus(intrinsicsActive, distortionActive, False)
    problem.addDesignVariable(cam_geometry.dv.distortionDesignVariable())
    problem.addDesignVariable(cam_geometry.dv.projectionDesignVariable())
    problem.addDesignVariable(cam_geometry.dv.shutterDesignVariable())

    #corner uncertainty
    cornerUncertainty = 1.0
    R = np.eye(2) * cornerUncertainty * cornerUncertainty
    invR = np.linalg.inv(R)

    #get the image and target points corresponding to the frame
    target = cam_geometry.ctarget.detector.target()

    #target pose dv for all target views (=T_camL_w)
    reprojectionErrors = []
    sm.logDebug(
        "calibrateIntrinsics: adding camera error terms for {0} calibration targets"
        .format(len(obslist)))
    target_pose_dvs = list()
    for obs in obslist:
        success, T_t_c = cam_geometry.geometry.estimateTransformation(obs)
        target_pose_dv = addPoseDesignVariable(problem, T_t_c)
        target_pose_dvs.append(target_pose_dv)

        T_cam_w = target_pose_dv.toExpression().inverse()

        ## add error terms
        for i in range(0, target.size()):
            p_target = aopt.HomogeneousExpression(
                sm.toHomogeneous(target.point(i)))
            valid, y = obs.imagePoint(i)
            if valid:
                rerr = cam_geometry.model.reprojectionError(
                    y, invR, T_cam_w * p_target, cam_geometry.dv)
                problem.addErrorTerm(rerr)
                reprojectionErrors.append(rerr)

    sm.logDebug("calibrateIntrinsics: added {0} camera error terms".format(
        len(reprojectionErrors)))

    ############################################
    ## solve
    ############################################
    options = aopt.Optimizer2Options()
    options.verbose = True if sm.getLoggingLevel(
    ) == sm.LoggingLevel.Debug else False
    options.nThreads = 4
    options.convergenceDeltaX = 1e-3
    options.convergenceDeltaJ = 1
    options.maxIterations = 200
    options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)

    optimizer = aopt.Optimizer2(options)
    optimizer.setProblem(problem)

    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        sm.logDebug("Before optimization:")
        e2 = np.array([e.evaluateError() for e in reprojectionErrors])
        sm.logDebug(
            " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))

    #run intrinsic calibration
    try:
        retval = optimizer.optimize()
        if retval.linearSolverFailure:
            sm.logError("calibrateIntrinsics: Optimization failed!")
        success = not retval.linearSolverFailure

    except:
        sm.logError("calibrateIntrinsics: Optimization failed!")
        success = False

    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        d = cam_geometry.geometry.projection().distortion().getParameters(
        ).flatten()
        p = cam_geometry.geometry.projection().getParameters().flatten()
        sm.logDebug(
            "calibrateIntrinsics: guess for intrinsics cam: {0}".format(p))
        sm.logDebug(
            "calibrateIntrinsics: guess for distortion cam: {0}".format(d))

    return success
コード例 #13
0
def stereoCalibrate(camL_geometry,
                    camH_geometry,
                    obslist,
                    distortionActive=False,
                    baseline=None):
    #####################################################
    ## find initial guess as median of  all pnp solutions
    #####################################################
    if baseline is None:
        r = []
        t = []
        for obsL, obsH in obslist:
            #if we have observations for both camss
            if obsL is not None and obsH is not None:
                success, T_L = camL_geometry.geometry.estimateTransformation(
                    obsL)
                success, T_H = camH_geometry.geometry.estimateTransformation(
                    obsH)

                baseline = T_H.inverse() * T_L
                t.append(baseline.t())
                rv = sm.RotationVector()
                r.append(rv.rotationMatrixToParameters(baseline.C()))

        r_median = np.median(np.asmatrix(r), axis=0).flatten().T
        R_median = rv.parametersToRotationMatrix(r_median)
        t_median = np.median(np.asmatrix(t), axis=0).flatten().T

        baseline_HL = sm.Transformation(sm.rt2Transform(R_median, t_median))
    else:
        baseline_HL = baseline

    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        dL = camL_geometry.geometry.projection().distortion().getParameters(
        ).flatten()
        pL = camL_geometry.geometry.projection().getParameters().flatten()
        dH = camH_geometry.geometry.projection().distortion().getParameters(
        ).flatten()
        pH = camH_geometry.geometry.projection().getParameters().flatten()
        sm.logDebug("initial guess for stereo calib: {0}".format(
            baseline_HL.T()))
        sm.logDebug("initial guess for intrinsics camL: {0}".format(pL))
        sm.logDebug("initial guess for intrinsics camH: {0}".format(pH))
        sm.logDebug("initial guess for distortion camL: {0}".format(dL))
        sm.logDebug("initial guess for distortion camH: {0}".format(dH))

    ############################################
    ## solve the bundle adjustment
    ############################################
    problem = aopt.OptimizationProblem()

    #baseline design variable
    baseline_dv = addPoseDesignVariable(problem, baseline_HL)

    #target pose dv for all target views (=T_camL_w)
    target_pose_dvs = list()
    for obsL, obsH in obslist:
        if obsL is not None:  #use camL if we have an obs for this one
            success, T_t_cL = camL_geometry.geometry.estimateTransformation(
                obsL)
        else:
            success, T_t_cH = camH_geometry.geometry.estimateTransformation(
                obsH)
            T_t_cL = T_t_cH * baseline_HL  #apply baseline for the second camera

        target_pose_dv = addPoseDesignVariable(problem, T_t_cL)
        target_pose_dvs.append(target_pose_dv)

    #add camera dvs
    camL_geometry.setDvActiveStatus(True, distortionActive, False)
    camH_geometry.setDvActiveStatus(True, distortionActive, False)
    problem.addDesignVariable(camL_geometry.dv.distortionDesignVariable())
    problem.addDesignVariable(camL_geometry.dv.projectionDesignVariable())
    problem.addDesignVariable(camL_geometry.dv.shutterDesignVariable())
    problem.addDesignVariable(camH_geometry.dv.distortionDesignVariable())
    problem.addDesignVariable(camH_geometry.dv.projectionDesignVariable())
    problem.addDesignVariable(camH_geometry.dv.shutterDesignVariable())

    ############################################
    ## add error terms
    ############################################

    #corner uncertainty
    # \todo pass in the detector uncertainty somehow.
    cornerUncertainty = 1.0
    R = np.eye(2) * cornerUncertainty * cornerUncertainty
    invR = np.linalg.inv(R)

    #Add reprojection error terms for both cameras
    reprojectionErrors0 = []
    reprojectionErrors1 = []

    for cidx, cam in enumerate([camL_geometry, camH_geometry]):
        sm.logDebug(
            "stereoCalibration: adding camera error terms for {0} calibration targets"
            .format(len(obslist)))

        #get the image and target points corresponding to the frame
        target = cam.ctarget.detector.target()

        #add error terms for all observations
        for view_id, obstuple in enumerate(obslist):

            #add error terms if we have an observation for this cam
            obs = obstuple[cidx]
            if obs is not None:
                T_cam_w = target_pose_dvs[view_id].toExpression().inverse()

                #add the baseline for the second camera
                if cidx != 0:
                    T_cam_w = baseline_dv.toExpression() * T_cam_w

                for i in range(0, target.size()):
                    p_target = aopt.HomogeneousExpression(
                        sm.toHomogeneous(target.point(i)))
                    valid, y = obs.imagePoint(i)
                    if valid:
                        # Create an error term.
                        rerr = cam.model.reprojectionError(
                            y, invR, T_cam_w * p_target, cam.dv)
                        rerr.idx = i
                        problem.addErrorTerm(rerr)

                        if cidx == 0:
                            reprojectionErrors0.append(rerr)
                        else:
                            reprojectionErrors1.append(rerr)

        sm.logDebug("stereoCalibrate: added {0} camera error terms".format(
            len(reprojectionErrors0) + len(reprojectionErrors1)))

    ############################################
    ## solve
    ############################################
    options = aopt.Optimizer2Options()
    options.verbose = True if sm.getLoggingLevel(
    ) == sm.LoggingLevel.Debug else False
    options.nThreads = 4
    options.convergenceDeltaX = 1e-3
    options.convergenceDeltaJ = 1
    options.maxIterations = 200
    options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)

    optimizer = aopt.Optimizer2(options)
    optimizer.setProblem(problem)

    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        sm.logDebug("Before optimization:")
        e2 = np.array([e.evaluateError() for e in reprojectionErrors0])
        sm.logDebug(
            " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))
        e2 = np.array([e.evaluateError() for e in reprojectionErrors1])
        sm.logDebug(
            " Reprojection error squarred (camH):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))

        sm.logDebug("baseline={0}".format(
            baseline_dv.toTransformationMatrix()))

    try:
        retval = optimizer.optimize()
        if retval.linearSolverFailure:
            sm.logError("stereoCalibrate: Optimization failed!")
        success = not retval.linearSolverFailure
    except:
        sm.logError("stereoCalibrate: Optimization failed!")
        success = False

    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        sm.logDebug("After optimization:")
        e2 = np.array([e.evaluateError() for e in reprojectionErrors0])
        sm.logDebug(
            " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))
        e2 = np.array([e.evaluateError() for e in reprojectionErrors1])
        sm.logDebug(
            " Reprojection error squarred (camH):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))

    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        dL = camL_geometry.geometry.projection().distortion().getParameters(
        ).flatten()
        pL = camL_geometry.geometry.projection().getParameters().flatten()
        dH = camH_geometry.geometry.projection().distortion().getParameters(
        ).flatten()
        pH = camH_geometry.geometry.projection().getParameters().flatten()
        sm.logDebug("guess for intrinsics camL: {0}".format(pL))
        sm.logDebug("guess for intrinsics camH: {0}".format(pH))
        sm.logDebug("guess for distortion camL: {0}".format(dL))
        sm.logDebug("guess for distortion camH: {0}".format(dH))

    if success:
        baseline_HL = sm.Transformation(baseline_dv.toTransformationMatrix())
        return success, baseline_HL
    else:
        #return the initial guess if we fail
        return success, baseline_HL
コード例 #14
0
    def fromTargetViewObservations(cls, cameras, target, baselines, T_tc_guess, rig_observations, useBlakeZissermanMest=True):
        rval = CalibrationTargetOptimizationProblem()

        #store the arguements in case we want to rebuild a modified problem
        rval.cameras = cameras
        rval.target = target
        rval.baselines = baselines
        rval.T_tc_guess = T_tc_guess
        rval.rig_observations = rig_observations

        # 1. Create a design variable for this pose
        T_target_camera = T_tc_guess

        rval.dv_T_target_camera = aopt.TransformationDv(T_target_camera)
        for i in range(0, rval.dv_T_target_camera.numDesignVariables()):
            rval.addDesignVariable( rval.dv_T_target_camera.getDesignVariable(i), TRANSFORMATION_GROUP_ID)

        #2. add all baselines DVs
        for baseline_dv in baselines:
            for i in range(0, baseline_dv.numDesignVariables()):
                rval.addDesignVariable(baseline_dv.getDesignVariable(i), CALIBRATION_GROUP_ID)

        #3. add landmark DVs
        for p in target.P_t_dv:
            rval.addDesignVariable(p,LANDMARK_GROUP_ID)

        #4. add camera DVs
        for camera in cameras:
            if not camera.isGeometryInitialized:
                raise RuntimeError('The camera geometry is not initialized. Please initialize with initGeometry() or initGeometryFromDataset()')
            camera.setDvActiveStatus(True, True, False)
            rval.addDesignVariable(camera.dv.distortionDesignVariable(), CALIBRATION_GROUP_ID)
            rval.addDesignVariable(camera.dv.projectionDesignVariable(), CALIBRATION_GROUP_ID)
            rval.addDesignVariable(camera.dv.shutterDesignVariable(), CALIBRATION_GROUP_ID)

        #4.add all observations for this view
        cams_in_view = set()
        rval.rerrs=dict()
        rerr_cnt=0
        for cam_id, obs in rig_observations:
            camera = cameras[cam_id]
            cams_in_view.add(cam_id)

            #add reprojection errors
            #build baseline chain (target->cam0->baselines->camN)
            T_cam0_target = rval.dv_T_target_camera.expression.inverse()
            T_camN_calib = T_cam0_target
            for idx in range(0, cam_id):
                T_camN_calib =  baselines[idx].toExpression() * T_camN_calib

            # \todo pass in the detector uncertainty somehow.
            cornerUncertainty = 1.0
            R = np.eye(2) * cornerUncertainty * cornerUncertainty
            invR = np.linalg.inv(R)

            rval.rerrs[cam_id] = list()
            for i in range(0,len(target.P_t_ex)):
                p_target = target.P_t_ex[i]
                valid, y = obs.imagePoint(i)
                if valid:
                    rerr_cnt+=1
                    # Create an error term.
                    rerr = camera.model.reprojectionError(y, invR, T_camN_calib * p_target, camera.dv)
                    rerr.idx = i

                    #add blake-zisserman mest
                    if useBlakeZissermanMest:
                        mest = aopt.BlakeZissermanMEstimator( 2.0 )
                        rerr.setMEstimatorPolicy(mest)
                    rval.addErrorTerm(rerr)
                    rval.rerrs[cam_id].append(rerr)
                else:
                    rval.rerrs[cam_id].append(None)

        sm.logDebug("Adding a view with {0} cameras and {1} error terms".format(len(cams_in_view), rerr_cnt))
        return rval
コード例 #15
0
def solveFullBatch(cameras, baseline_guesses, graph):    
    ############################################
    ## solve the bundle adjustment
    ############################################
    problem = aopt.OptimizationProblem()
    
    #add camera dvs
    for cam in cameras:
        cam.setDvActiveStatus(True, True, False)
        problem.addDesignVariable(cam.dv.distortionDesignVariable())
        problem.addDesignVariable(cam.dv.projectionDesignVariable())
        problem.addDesignVariable(cam.dv.shutterDesignVariable())
    
    baseline_dvs = list()
    for baseline_idx in range(0, len(cameras)-1): 
        baseline_dv = aopt.TransformationDv(baseline_guesses[baseline_idx])
        
        for i in range(0, baseline_dv.numDesignVariables()):
            problem.addDesignVariable(baseline_dv.getDesignVariable(i))
        
        baseline_dvs.append( baseline_dv )
    
    #corner uncertainty
    cornerUncertainty = 1.0
    R = np.eye(2) * cornerUncertainty * cornerUncertainty
    invR = np.linalg.inv(R)
    
    #get the target
    target = cameras[0].ctarget.detector.target()

    #Add calibration target reprojection error terms for all camera in chain
    target_pose_dvs = list()
      
    #shuffle the views
    reprojectionErrors = [];    
    timestamps = graph.obs_db.getAllViewTimestamps()
    for view_id, timestamp in enumerate(timestamps):
        
        #get all observations for all cams at this time
        obs_tuple = graph.obs_db.getAllObsAtTimestamp(timestamp)

        #create a target pose dv for all target views (= T_cam0_w)
        T0 = graph.getTargetPoseGuess(timestamp, cameras, baseline_guesses)
        target_pose_dv = addPoseDesignVariable(problem, T0)
        target_pose_dvs.append(target_pose_dv)
        

        for cidx, obs in obs_tuple:
            cam = cameras[cidx]
              
            #calibration target coords to camera X coords
            T_cam0_calib = target_pose_dv.toExpression().inverse()

            #build pose chain (target->cam0->baselines->camN)
            T_camN_calib = T_cam0_calib
            for idx in range(0, cidx):
                T_camN_calib = baseline_dvs[idx].toExpression() * T_camN_calib
                
        
            ## add error terms
            for i in range(0, target.size()):
                p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));
                valid, y = obs.imagePoint(i)
                if valid:
                    rerr = cameras[cidx].model.reprojectionError(y, invR, T_camN_calib * p_target, cameras[cidx].dv)
                    problem.addErrorTerm(rerr)
                    reprojectionErrors.append(rerr)
                                                    
    sm.logDebug("solveFullBatch: added {0} camera error terms".format(len(reprojectionErrors)))
    
    ############################################
    ## solve
    ############################################       
    options = aopt.Optimizer2Options()
    options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False
    options.nThreads = 4
    options.convergenceDeltaX = 1e-3
    options.convergenceDeltaJ = 1
    options.maxIterations = 250
    options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)

    optimizer = aopt.Optimizer2(options)
    optimizer.setProblem(problem)

    #verbose output
    if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
        sm.logDebug("Before optimization:")
        e2 = np.array([ e.evaluateError() for e in reprojectionErrors ])
        sm.logDebug( " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
    
    #run intrinsic calibration
    try:
        retval = optimizer.optimize()
        if retval.linearSolverFailure:
            sm.logError("calibrateIntrinsics: Optimization failed!")
        success = not retval.linearSolverFailure

    except:
        sm.logError("calibrateIntrinsics: Optimization failed!")
        success = False

    baselines=list()
    for baseline_dv in baseline_dvs:
        baselines.append( sm.Transformation(baseline_dv.T()) )
    
    return success, baselines
コード例 #16
0
def calibrateIntrinsics(cam_geometry, obslist, distortionActive=True, intrinsicsActive=True):
    #verbose output
    if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
        d = cam_geometry.geometry.projection().distortion().getParameters().flatten()
        p = cam_geometry.geometry.projection().getParameters().flatten()
        sm.logDebug("calibrateIntrinsics: intrinsics guess: {0}".format(p))
        sm.logDebug("calibrateIntrinsics: distortion guess: {0}".format(d))
    
    ############################################
    ## solve the bundle adjustment
    ############################################
    problem = aopt.OptimizationProblem()
    
    #add camera dvs
    cam_geometry.setDvActiveStatus(intrinsicsActive, distortionActive, False)
    problem.addDesignVariable(cam_geometry.dv.distortionDesignVariable())
    problem.addDesignVariable(cam_geometry.dv.projectionDesignVariable())
    problem.addDesignVariable(cam_geometry.dv.shutterDesignVariable())
    
    #corner uncertainty
    cornerUncertainty = 1.0
    R = np.eye(2) * cornerUncertainty * cornerUncertainty
    invR = np.linalg.inv(R)
    
    #get the image and target points corresponding to the frame
    target = cam_geometry.ctarget.detector.target()
    
    #target pose dv for all target views (=T_camL_w)
    reprojectionErrors = [];    
    sm.logDebug("calibrateIntrinsics: adding camera error terms for {0} calibration targets".format(len(obslist)))
    target_pose_dvs=list()
    for obs in obslist: 
        success, T_t_c = cam_geometry.geometry.estimateTransformation(obs)
        target_pose_dv = addPoseDesignVariable(problem, T_t_c)
        target_pose_dvs.append(target_pose_dv)
        
        T_cam_w = target_pose_dv.toExpression().inverse()
    
        ## add error terms
        for i in range(0, target.size()):
            p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));
            valid, y = obs.imagePoint(i)
            if valid:
                rerr = cam_geometry.model.reprojectionError(y, invR, T_cam_w * p_target, cam_geometry.dv)
                problem.addErrorTerm(rerr)
                reprojectionErrors.append(rerr)
                                                    
    sm.logDebug("calibrateIntrinsics: added {0} camera error terms".format(len(reprojectionErrors)))
    
    ############################################
    ## solve
    ############################################       
    options = aopt.Optimizer2Options()
    options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False
    options.nThreads = 4
    options.convergenceDeltaX = 1e-3
    options.convergenceDeltaJ = 1
    options.maxIterations = 200
    options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)

    optimizer = aopt.Optimizer2(options)
    optimizer.setProblem(problem)

    #verbose output
    if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
        sm.logDebug("Before optimization:")
        e2 = np.array([ e.evaluateError() for e in reprojectionErrors ])
        sm.logDebug( " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
    
    #run intrinsic calibration
    try: 
        retval = optimizer.optimize()
        if retval.linearSolverFailure:
            sm.logError("calibrateIntrinsics: Optimization failed!")
        success = not retval.linearSolverFailure

    except:
        sm.logError("calibrateIntrinsics: Optimization failed!")
        success = False
    
    #verbose output
    if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
        d = cam_geometry.geometry.projection().distortion().getParameters().flatten()
        p = cam_geometry.geometry.projection().getParameters().flatten()
        sm.logDebug("calibrateIntrinsics: guess for intrinsics cam: {0}".format(p))
        sm.logDebug("calibrateIntrinsics: guess for distortion cam: {0}".format(d))
    
    return success
コード例 #17
0
def stereoCalibrate(camL_geometry, camH_geometry, obslist, distortionActive=False, baseline=None):
    #####################################################
    ## find initial guess as median of  all pnp solutions
    #####################################################
    if baseline is None:
        r=[]; t=[]
        for obsL, obsH in obslist:
            #if we have observations for both camss
            if obsL is not None and obsH is not None:
                success, T_L = camL_geometry.geometry.estimateTransformation(obsL)
                success, T_H = camH_geometry.geometry.estimateTransformation(obsH)
                
                baseline = T_H.inverse()*T_L
                t.append(baseline.t())
                rv=sm.RotationVector()
                r.append(rv.rotationMatrixToParameters( baseline.C() ))
        
        r_median = np.median(np.asmatrix(r), axis=0).flatten().T
        R_median = rv.parametersToRotationMatrix(r_median)
        t_median = np.median(np.asmatrix(t), axis=0).flatten().T
        
        baseline_HL = sm.Transformation( sm.rt2Transform(R_median, t_median) )
    else:
        baseline_HL = baseline
    
    #verbose output
    if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
        dL = camL_geometry.geometry.projection().distortion().getParameters().flatten()
        pL = camL_geometry.geometry.projection().getParameters().flatten()
        dH = camH_geometry.geometry.projection().distortion().getParameters().flatten()
        pH = camH_geometry.geometry.projection().getParameters().flatten()
        sm.logDebug("initial guess for stereo calib: {0}".format(baseline_HL.T()))
        sm.logDebug("initial guess for intrinsics camL: {0}".format(pL))
        sm.logDebug("initial guess for intrinsics camH: {0}".format(pH))
        sm.logDebug("initial guess for distortion camL: {0}".format(dL))
        sm.logDebug("initial guess for distortion camH: {0}".format(dH))    
    
    ############################################
    ## solve the bundle adjustment
    ############################################
    problem = aopt.OptimizationProblem()

    #baseline design variable        
    baseline_dv = addPoseDesignVariable(problem, baseline_HL)
        
    #target pose dv for all target views (=T_camL_w)
    target_pose_dvs = list()
    for obsL, obsH in obslist:
        if obsL is not None: #use camL if we have an obs for this one
            success, T_t_cL = camL_geometry.geometry.estimateTransformation(obsL)
        else:
            success, T_t_cH = camH_geometry.geometry.estimateTransformation(obsH)
            T_t_cL = T_t_cH*baseline_HL #apply baseline for the second camera
            
        target_pose_dv = addPoseDesignVariable(problem, T_t_cL)
        target_pose_dvs.append(target_pose_dv)
    
    #add camera dvs
    camL_geometry.setDvActiveStatus(True, distortionActive, False)
    camH_geometry.setDvActiveStatus(True, distortionActive, False)
    problem.addDesignVariable(camL_geometry.dv.distortionDesignVariable())
    problem.addDesignVariable(camL_geometry.dv.projectionDesignVariable())
    problem.addDesignVariable(camL_geometry.dv.shutterDesignVariable())
    problem.addDesignVariable(camH_geometry.dv.distortionDesignVariable())
    problem.addDesignVariable(camH_geometry.dv.projectionDesignVariable())
    problem.addDesignVariable(camH_geometry.dv.shutterDesignVariable())
    
    ############################################
    ## add error terms
    ############################################
    
    #corner uncertainty
    # \todo pass in the detector uncertainty somehow.
    cornerUncertainty = 1.0
    R = np.eye(2) * cornerUncertainty * cornerUncertainty
    invR = np.linalg.inv(R)
        
    #Add reprojection error terms for both cameras
    reprojectionErrors0 = []; reprojectionErrors1 = []
            
    for cidx, cam in enumerate([camL_geometry, camH_geometry]):
        sm.logDebug("stereoCalibration: adding camera error terms for {0} calibration targets".format(len(obslist)))

        #get the image and target points corresponding to the frame
        target = cam.ctarget.detector.target()
        
        #add error terms for all observations
        for view_id, obstuple in enumerate(obslist):
            
            #add error terms if we have an observation for this cam
            obs=obstuple[cidx]
            if obs is not None:
                T_cam_w = target_pose_dvs[view_id].toExpression().inverse()
            
                #add the baseline for the second camera
                if cidx!=0:
                    T_cam_w =  baseline_dv.toExpression() * T_cam_w
                    
                for i in range(0, target.size()):
                    p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));
                    valid, y = obs.imagePoint(i)
                    if valid:
                        # Create an error term.
                        rerr = cam.model.reprojectionError(y, invR, T_cam_w * p_target, cam.dv)
                        rerr.idx = i
                        problem.addErrorTerm(rerr)
                    
                        if cidx==0:
                            reprojectionErrors0.append(rerr)
                        else:
                            reprojectionErrors1.append(rerr)
                                                        
        sm.logDebug("stereoCalibrate: added {0} camera error terms".format( len(reprojectionErrors0)+len(reprojectionErrors1) ))
        
    ############################################
    ## solve
    ############################################       
    options = aopt.Optimizer2Options()
    options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False
    options.nThreads = 4
    options.convergenceDeltaX = 1e-3
    options.convergenceDeltaJ = 1
    options.maxIterations = 200
    options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)

    optimizer = aopt.Optimizer2(options)
    optimizer.setProblem(problem)

    #verbose output
    if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
        sm.logDebug("Before optimization:")
        e2 = np.array([ e.evaluateError() for e in reprojectionErrors0 ])
        sm.logDebug( " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
        e2 = np.array([ e.evaluateError() for e in reprojectionErrors1 ])
        sm.logDebug( " Reprojection error squarred (camH):  mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
    
        sm.logDebug("baseline={0}".format(baseline_dv.toTransformationMatrix()))
    
    try: 
        retval = optimizer.optimize()
        if retval.linearSolverFailure:
            sm.logError("stereoCalibrate: Optimization failed!")
        success = not retval.linearSolverFailure
    except:
        sm.logError("stereoCalibrate: Optimization failed!")
        success = False
    
    if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
        sm.logDebug("After optimization:")
        e2 = np.array([ e.evaluateError() for e in reprojectionErrors0 ])
        sm.logDebug( " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
        e2 = np.array([ e.evaluateError() for e in reprojectionErrors1 ])
        sm.logDebug( " Reprojection error squarred (camH):  mean {0}, median {1}, std: {2}".format(np.mean(e2), np.median(e2), np.std(e2) ) )
    
    #verbose output
    if sm.getLoggingLevel()==sm.LoggingLevel.Debug:
        dL = camL_geometry.geometry.projection().distortion().getParameters().flatten()
        pL = camL_geometry.geometry.projection().getParameters().flatten()
        dH = camH_geometry.geometry.projection().distortion().getParameters().flatten()
        pH = camH_geometry.geometry.projection().getParameters().flatten()
        sm.logDebug("guess for intrinsics camL: {0}".format(pL))
        sm.logDebug("guess for intrinsics camH: {0}".format(pH))
        sm.logDebug("guess for distortion camL: {0}".format(dL))
        sm.logDebug("guess for distortion camH: {0}".format(dH))    
    
    if success:
        baseline_HL = sm.Transformation(baseline_dv.toTransformationMatrix())
        return success, baseline_HL
    else:
        #return the intiial guess if we fail
        return success, baseline_HL