Beispiel #1
0
def printDebugEnd(cself):
    print
    print

    for cidx, cam in enumerate(cself.cameras):
        print "cam{0}".format(cidx)
        print "----------"
        print
        print

        corners, reprojs, rerrs = getReprojectionErrors(cself, cidx)
        if len(rerrs) > 0:
            me, se = getReprojectionErrorStatistics(rerrs)
            print me[0]
            print me[1]
            print se[0]
            print se[1]

        print
        p = cam.geometry.projection().getParameters().flatten(1)
        for temp in p:
            print temp

        print
        d = cam.geometry.projection().distortion().getParameters().flatten(1)
        for temp in d:
            print temp

        if cidx > 0:
            bidx = cidx - 1
            T = sm.Transformation(cself.baselines[bidx].T())
            for temp in T.t():
                print temp

            for temp in T.q():
                print temp

    print
    print
Beispiel #2
0
    def registration(self, target, w=0.0,
                     objective_type='pt2pt',
                     maxiter=50, tol=0.001,
                     feature_fn=lambda x: x):
        q = None
        ftarget = feature_fn(target)

        # build the problem
        self.problem = aopt.OptimizationProblem()
        T_init = np.array([[-0.99813747, -0.05730897, -0.02091093, 0.03399231],
                           [0.05726434, -0.99835533, 0.00272772, 0.31433327],
                           [-0.02103286, 0.00152519, 0.99977762, 0.22997991],
                           [0., 0., 0., 1.]])
        self.T_b_l_Dv = aopt.TransformationDv(sm.Transformation(), rotationActive=True, translationActive=True)
        for i in range(0, self.T_b_l_Dv.numDesignVariables()):
            self.problem.addDesignVariable(self.T_b_l_Dv.getDesignVariable(i))

        for _ in range(maxiter):
            T_b_l = self.T_b_l_Dv.toExpression().toTransformationMatrix()
            print("Inital laser to body transformation: T_b_l ")
            print(T_b_l)
            T_l_b = np.linalg.inv(T_b_l)

            numPoints = self._source.shape[0]
            source = np.hstack([self._source, np.ones((numPoints, 1), dtype=self._source.dtype)])
            t_source = np.array([np.dot(T_l_b, np.dot(self._sensor_tfs[i], np.dot(T_b_l, source[i]))) for i in xrange(numPoints)])
            t_source = t_source[:, :3]
            util.showPointCloud([t_source, target])
            fsource = feature_fn(t_source)
            estep_res = self.expectation_step(fsource, ftarget, target, objective_type)

            res = self.maximization_step(self._source, target, estep_res, w=w,
                                         objective_type=objective_type)

            if not q is None and abs(res.q - q) < tol:
                break
            q = res.q
        return res.transformation
Beispiel #3
0
def printParameters(cself, dest=sys.stdout):
    #get the covariances
    std_baselines, std_cameras = recoverCovariance(cself)

    #print cameras
    print("Camera-system parameters:", file=dest)
    for cidx, cam in enumerate(cself.cameras):
        d = cam.geometry.projection().distortion().getParameters().flatten()
        p = cam.geometry.projection().getParameters().flatten()
        dd = std_cameras[cidx][0:d.shape[0]]
        dp = std_cameras[cidx][d.shape[0]:]
        print("\tcam{0} ({1}):".format(cidx, cam.dataset.topic), file=dest)
        print("\t type: %s" % (type(cam.geometry)), file=dest)
        print("\t distortion: %s +- %s" % (d, np.array(dd)), file=dest)
        print("\t projection: %s +- %s" % (p, np.array(dp)), file=dest)

        #reproj error statistics
        corners, reprojs, rerrs = getReprojectionErrors(cself, cidx)
        if len(rerrs) > 0:
            me, se = getReprojectionErrorStatistics(rerrs)
            try:
                print("\t reprojection error: [%f, %f] +- [%f, %f]" %
                      (me[0], me[1], se[0], se[1]),
                      file=dest)
            except:
                print("\t Failed printing the reprojection error.", file=dest)
            print(file=dest)

    #print baselines
    for bidx, baseline in enumerate(cself.baselines):
        T = sm.Transformation(baseline.T())
        dq = std_baselines[bidx][0:3]
        dt = std_baselines[bidx][3:6]
        print("\tbaseline T_{1}_{0}:".format(bidx, bidx + 1), file=dest)
        print("\t q: %s +- %s" % (T.q(), np.array(dq)), file=dest)
        print("\t t: %s +- %s" % (T.t(), np.array(dt)), file=dest)
        print(file=dest)
Beispiel #4
0
    def __init__(self, imuConfig, parsed):
        #store input
        self.imuConfig = imuConfig

        #load dataset
        self.dataset = initImuBagDataset(parsed.bagfile[0],
                                         imuConfig.getRosTopic(),
                                         parsed.bag_from_to)

        #statistics
        self.accelUncertaintyDiscrete, self.accelRandomWalk, self.accelUncertainty = self.imuConfig.getAccelerometerStatistics(
        )
        self.gyroUncertaintyDiscrete, self.gyroRandomWalk, self.gyroUncertainty = self.imuConfig.getGyroStatistics(
        )

        #init GyroBiasPrior (+ count for recursive averaging if we have more than 1 measurement = >1 cameras)
        self.GyroBiasPrior = np.array([0, 0, 0])
        self.GyroBiasPriorCount = 0

        #imu position
        self.T_ib = sm.Transformation()

        #load the imu dataset
        self.loadImuData()
Beispiel #5
0
def saveChainParametersYaml(cself, resultFile, graph):
    cameraModels = {
        acvb.DistortedPinhole: 'pinhole',
        acvb.EquidistantPinhole: 'pinhole',
        acvb.FovPinhole: 'pinhole',
        acvb.Omni: 'omni',
        acvb.DistortedOmni: 'omni',
        acvb.ExtendedUnified: 'eucm',
        acvb.DoubleSphere: 'ds'
    }
    distortionModels = {
        acvb.DistortedPinhole: 'radtan',
        acvb.EquidistantPinhole: 'equidistant',
        acvb.FovPinhole: 'fov',
        acvb.Omni: 'none',
        acvb.DistortedOmni: 'radtan',
        acvb.ExtendedUnified: 'none',
        acvb.DoubleSphere: 'none'
    }

    chain = cr.CameraChainParameters(resultFile, createYaml=True)
    for cam_id, cam in enumerate(cself.cameras):
        cameraModel = cameraModels[cam.model]
        distortionModel = distortionModels[cam.model]

        #create new config file
        camParams = cr.CameraParameters(resultFile, createYaml=True)
        camParams.setRosTopic(cam.dataset.topic)

        #set the data
        P = cam.geometry.projection()
        if cameraModel == 'omni':
            camParams.setIntrinsics(
                cameraModel,
                [P.xi(), P.fu(), P.fv(),
                 P.cu(), P.cv()])
        elif cameraModel == 'pinhole':
            camParams.setIntrinsics(
                cameraModel,
                [P.fu(), P.fv(), P.cu(), P.cv()])
        elif cameraModel == 'eucm':
            camParams.setIntrinsics(
                cameraModel,
                [P.alpha(),
                 P.beta(), P.fu(),
                 P.fv(), P.cu(),
                 P.cv()])
        elif cameraModel == 'ds':
            camParams.setIntrinsics(
                cameraModel,
                [P.xi(), P.alpha(),
                 P.fu(), P.fv(),
                 P.cu(), P.cv()])
        else:
            raise RuntimeError("Invalid camera model {}.".format(cameraModel))
        camParams.setResolution([P.ru(), P.rv()])
        dist_coeffs = P.distortion().getParameters().flatten(1)
        camParams.setDistortion(distortionModel, dist_coeffs)

        chain.addCameraAtEnd(camParams)

    for cam_id, cam in enumerate(cself.cameras):
        overlaps = graph.getCamOverlaps(cam_id)
        chain.setCamOverlaps(cam_id, overlaps)

    #print all baselines in the camera chain
    for bidx, baseline_dv in enumerate(cself.baselines):
        baseline = sm.Transformation(baseline_dv.T())
        camNr = bidx + 1
        chain.setExtrinsicsLastCamToHere(camNr, baseline)

    chain.writeYaml()
Beispiel #6
0
    def findOrientationPriorCameraToImu(self, imu):
        print
        print "Estimating imu-camera rotation prior"

        # build the problem
        problem = aopt.OptimizationProblem()

        # Add the rotation as design variable
        q_i_c_Dv = aopt.RotationQuaternionDv(self.T_extrinsic.q())
        q_i_c_Dv.setActive(True)
        problem.addDesignVariable(q_i_c_Dv)

        # Add the gyro bias as design variable
        gyroBiasDv = aopt.EuclideanPointDv(np.zeros(3))
        gyroBiasDv.setActive(True)
        problem.addDesignVariable(gyroBiasDv)

        #initialize a pose spline using the camera poses
        poseSpline = self.initPoseSplineFromCamera(timeOffsetPadding=0.0)

        for im in imu.imuData:
            tk = im.stamp.toSec()
            if tk > poseSpline.t_min() and tk < poseSpline.t_max():
                #DV expressions
                R_i_c = q_i_c_Dv.toExpression()
                bias = gyroBiasDv.toExpression()

                #get the vision predicted omega and measured omega (IMU)
                omega_predicted = R_i_c * aopt.EuclideanExpression(
                    np.matrix(
                        poseSpline.angularVelocityBodyFrame(tk)).transpose())
                omega_measured = im.omega

                #error term
                gerr = ket.GyroscopeError(omega_measured, im.omegaInvR,
                                          omega_predicted, bias)
                problem.addErrorTerm(gerr)

        #define the optimization
        options = aopt.Optimizer2Options()
        options.verbose = False
        options.linearSolver = aopt.BlockCholeskyLinearSystemSolver()
        options.nThreads = 2
        options.convergenceDeltaX = 1e-4
        options.convergenceDeltaJ = 1
        options.maxIterations = 50

        #run the optimization
        optimizer = aopt.Optimizer2(options)
        optimizer.setProblem(problem)

        #get the prior
        try:
            optimizer.optimize()
        except:
            sm.logFatal("Failed to obtain orientation prior!")
            sys.exit(-1)

        #overwrite the external rotation prior (keep the external translation prior)
        R_i_c = q_i_c_Dv.toRotationMatrix().transpose()
        self.T_extrinsic = sm.Transformation(
            sm.rt2Transform(R_i_c, self.T_extrinsic.t()))

        #set the gyro bias prior (if we have more than 1 cameras use recursive average)
        b_gyro = bias.toEuclidean()
        imu.GyroBiasPriorCount += 1
        imu.GyroBiasPrior = (
            imu.GyroBiasPriorCount - 1.0
        ) / imu.GyroBiasPriorCount * imu.GyroBiasPrior + 1.0 / imu.GyroBiasPriorCount * b_gyro

        #print result
        print "  Orientation prior camera-imu found as: (T_i_c)"
        print R_i_c
        print "  Gyro bias prior found as: (b_gyro)"
        print b_gyro
Beispiel #7
0
    def getInitialGuesses(self, cameras):

        if not self.G:
            raise RuntimeError("Graph is uninitialized!")

        #################################################################
        ## STEP 0: check if all cameras in the chain are connected
        ##         through common target point observations
        ##         (=all vertices connected?)
        #################################################################
        if not self.isGraphConnected():
            sm.logError(
                "The cameras are not connected through mutual target observations! "
                "Please provide another dataset...")

            self.plotGraph()
            sys.exit(0)

        #################################################################
        ## STEP 1: get baseline initial guesses by calibrating good
        ##         camera pairs using a stereo calibration
        ##
        #################################################################

        #first we need to find the best camera pairs to obtain the initial guesses
        #--> use the pairs that share the most common observed target corners
        #The graph is built with weighted edges that represent the number of common
        #target corners, so we can use dijkstras algorithm to get the best pair
        #configuration for the initial pair calibrations
        weights = [1.0 / commonPoints for commonPoints in self.G.es["weight"]]

        #choose the cam with the least edges as base_cam
        outdegrees = self.G.vs.outdegree()
        base_cam_id = outdegrees.index(min(outdegrees))

        #solve for shortest path  (=optimal transformation chaining)
        edges_on_path = self.G.get_shortest_paths(0,
                                                  weights=weights,
                                                  output="epath")

        self.optimal_baseline_edges = set(
            [item for sublist in edges_on_path for item in sublist])

        #################################################################
        ## STEP 2: solve stereo calibration problem for the baselines
        ##         (baselines are always from lower_id to higher_id cams!)
        #################################################################

        #calibrate all cameras in pairs
        for baseline_edge_id in self.optimal_baseline_edges:

            #get the cam_nrs from the graph edge (calibrate from low to high id)
            vertices = self.G.es[baseline_edge_id].tuple
            if vertices[0] < vertices[1]:
                camL_nr = vertices[0]
                camH_nr = vertices[1]
            else:
                camL_nr = vertices[1]
                camH_nr = vertices[0]

            print "\t initializing camera pair ({0},{1})...  ".format(
                camL_nr, camH_nr)

            #run the pair extrinsic calibration
            obs_list = self.obs_db.getAllObsTwoCams(camL_nr, camH_nr)
            success, baseline_HL = kcc.stereoCalibrate(cameras[camL_nr],
                                                       cameras[camH_nr],
                                                       obs_list,
                                                       distortionActive=False)

            if success:
                sm.logDebug("baseline_{0}_{1}={2}".format(
                    camL_nr, camH_nr, baseline_HL.T()))
            else:
                sm.logError(
                    "initialization of camera pair ({0},{1}) failed  ".format(
                        camL_nr, camH_nr))
                sm.logError("estimated baseline_{0}_{1}={2}".format(
                    camL_nr, camH_nr, baseline_HL.T()))

            #store the baseline in the graph
            self.G.es[self.G.get_eid(camL_nr,
                                     camH_nr)]["baseline_HL"] = baseline_HL

        #################################################################
        ## STEP 3: transform from the "optimal" baseline chain to camera chain ordering
        ##         (=> baseline_0 = T_c1_c0 |
        #################################################################

        #construct the optimal path graph
        G_optimal_baselines = self.G.copy()

        eid_not_optimal_path = set(range(0, len(G_optimal_baselines.es)))
        for eid in self.optimal_baseline_edges:
            eid_not_optimal_path.remove(eid)
        G_optimal_baselines.delete_edges(eid_not_optimal_path)

        #now we convert the arbitary baseline graph to baselines starting from
        # cam0 and traverse the chain (cam0->cam1->cam2->camN)
        baselines = []
        for baseline_id in range(0, self.numCams - 1):
            #find the shortest path on the graph
            path = G_optimal_baselines.get_shortest_paths(
                baseline_id, baseline_id + 1)[0]

            #get the baseline from cam with id baseline_id to baseline_id+1
            baseline_HL = sm.Transformation()
            for path_idx in range(0, len(path) - 1):
                source_vert = path[path_idx]
                target_vert = path[path_idx + 1]
                T_edge = self.G.es[self.G.get_eid(source_vert,
                                                  target_vert)]["baseline_HL"]

                #correct the direction (baselines always from low to high cam id!)
                T_edge = T_edge if source_vert < target_vert else T_edge.inverse(
                )

                #chain up
                baseline_HL = T_edge * baseline_HL

            #store in graph
            baselines.append(baseline_HL)

        #################################################################
        ## STEP 4: refine guess in full batch
        #################################################################
        success, baselines = kcc.solveFullBatch(cameras, baselines, self)

        if not success:
            sm.logWarn("Full batch refinement failed!")

        return baselines
Beispiel #8
0
def solveFullBatch(cameras, baseline_guesses, graph):
    ############################################
    ## solve the bundle adjustment
    ############################################
    problem = aopt.OptimizationProblem()

    #add camera dvs
    for cam in cameras:
        cam.setDvActiveStatus(True, True, False)
        problem.addDesignVariable(cam.dv.distortionDesignVariable())
        problem.addDesignVariable(cam.dv.projectionDesignVariable())
        problem.addDesignVariable(cam.dv.shutterDesignVariable())

    baseline_dvs = list()
    for baseline_idx in range(0, len(cameras) - 1):
        baseline_dv = aopt.TransformationDv(baseline_guesses[baseline_idx])

        for i in range(0, baseline_dv.numDesignVariables()):
            problem.addDesignVariable(baseline_dv.getDesignVariable(i))

        baseline_dvs.append(baseline_dv)

    #corner uncertainty
    cornerUncertainty = 1.0
    R = np.eye(2) * cornerUncertainty * cornerUncertainty
    invR = np.linalg.inv(R)

    #get the target
    target = cameras[0].ctarget.detector.target()

    #Add calibration target reprojection error terms for all camera in chain
    target_pose_dvs = list()

    #shuffle the views
    reprojectionErrors = []
    timestamps = graph.obs_db.getAllViewTimestamps()
    for view_id, timestamp in enumerate(timestamps):

        #get all observations for all cams at this time
        obs_tuple = graph.obs_db.getAllObsAtTimestamp(timestamp)

        #create a target pose dv for all target views (= T_cam0_w)
        T0 = graph.getTargetPoseGuess(timestamp, cameras, baseline_guesses)
        target_pose_dv = addPoseDesignVariable(problem, T0)
        target_pose_dvs.append(target_pose_dv)

        for cidx, obs in obs_tuple:
            cam = cameras[cidx]

            #calibration target coords to camera X coords
            T_cam0_calib = target_pose_dv.toExpression().inverse()

            #build pose chain (target->cam0->baselines->camN)
            T_camN_calib = T_cam0_calib
            for idx in range(0, cidx):
                T_camN_calib = baseline_dvs[idx].toExpression() * T_camN_calib

            ## add error terms
            for i in range(0, target.size()):
                p_target = aopt.HomogeneousExpression(
                    sm.toHomogeneous(target.point(i)))
                valid, y = obs.imagePoint(i)
                if valid:
                    rerr = cameras[cidx].model.reprojectionError(
                        y, invR, T_camN_calib * p_target, cameras[cidx].dv)
                    problem.addErrorTerm(rerr)
                    reprojectionErrors.append(rerr)

    sm.logDebug("solveFullBatch: added {0} camera error terms".format(
        len(reprojectionErrors)))

    ############################################
    ## solve
    ############################################
    options = aopt.Optimizer2Options()
    options.verbose = True if sm.getLoggingLevel(
    ) == sm.LoggingLevel.Debug else False
    options.nThreads = 4
    options.convergenceDeltaX = 1e-3
    options.convergenceDeltaJ = 1
    options.maxIterations = 250
    options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)

    optimizer = aopt.Optimizer2(options)
    optimizer.setProblem(problem)

    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        sm.logDebug("Before optimization:")
        e2 = np.array([e.evaluateError() for e in reprojectionErrors])
        sm.logDebug(
            " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))

    #run intrinsic calibration
    try:
        retval = optimizer.optimize()
        if retval.linearSolverFailure:
            sm.logError("calibrateIntrinsics: Optimization failed!")
        success = not retval.linearSolverFailure

    except:
        sm.logError("calibrateIntrinsics: Optimization failed!")
        success = False

    baselines = list()
    for baseline_dv in baseline_dvs:
        baselines.append(sm.Transformation(baseline_dv.T()))

    return success, baselines
Beispiel #9
0
def stereoCalibrate(camL_geometry,
                    camH_geometry,
                    obslist,
                    distortionActive=False,
                    baseline=None):
    #####################################################
    ## find initial guess as median of  all pnp solutions
    #####################################################
    if baseline is None:
        r = []
        t = []
        for obsL, obsH in obslist:
            #if we have observations for both camss
            if obsL is not None and obsH is not None:
                success, T_L = camL_geometry.geometry.estimateTransformation(
                    obsL)
                success, T_H = camH_geometry.geometry.estimateTransformation(
                    obsH)

                baseline = T_H.inverse() * T_L
                t.append(baseline.t())
                rv = sm.RotationVector()
                r.append(rv.rotationMatrixToParameters(baseline.C()))

        r_median = np.median(np.asmatrix(r), axis=0).flatten().T
        R_median = rv.parametersToRotationMatrix(r_median)
        t_median = np.median(np.asmatrix(t), axis=0).flatten().T

        baseline_HL = sm.Transformation(sm.rt2Transform(R_median, t_median))
    else:
        baseline_HL = baseline

    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        dL = camL_geometry.geometry.projection().distortion().getParameters(
        ).flatten()
        pL = camL_geometry.geometry.projection().getParameters().flatten()
        dH = camH_geometry.geometry.projection().distortion().getParameters(
        ).flatten()
        pH = camH_geometry.geometry.projection().getParameters().flatten()
        sm.logDebug("initial guess for stereo calib: {0}".format(
            baseline_HL.T()))
        sm.logDebug("initial guess for intrinsics camL: {0}".format(pL))
        sm.logDebug("initial guess for intrinsics camH: {0}".format(pH))
        sm.logDebug("initial guess for distortion camL: {0}".format(dL))
        sm.logDebug("initial guess for distortion camH: {0}".format(dH))

    ############################################
    ## solve the bundle adjustment
    ############################################
    problem = aopt.OptimizationProblem()

    #baseline design variable
    baseline_dv = addPoseDesignVariable(problem, baseline_HL)

    #target pose dv for all target views (=T_camL_w)
    target_pose_dvs = list()
    for obsL, obsH in obslist:
        if obsL is not None:  #use camL if we have an obs for this one
            success, T_t_cL = camL_geometry.geometry.estimateTransformation(
                obsL)
        else:
            success, T_t_cH = camH_geometry.geometry.estimateTransformation(
                obsH)
            T_t_cL = T_t_cH * baseline_HL  #apply baseline for the second camera

        target_pose_dv = addPoseDesignVariable(problem, T_t_cL)
        target_pose_dvs.append(target_pose_dv)

    #add camera dvs
    camL_geometry.setDvActiveStatus(True, distortionActive, False)
    camH_geometry.setDvActiveStatus(True, distortionActive, False)
    problem.addDesignVariable(camL_geometry.dv.distortionDesignVariable())
    problem.addDesignVariable(camL_geometry.dv.projectionDesignVariable())
    problem.addDesignVariable(camL_geometry.dv.shutterDesignVariable())
    problem.addDesignVariable(camH_geometry.dv.distortionDesignVariable())
    problem.addDesignVariable(camH_geometry.dv.projectionDesignVariable())
    problem.addDesignVariable(camH_geometry.dv.shutterDesignVariable())

    ############################################
    ## add error terms
    ############################################

    #corner uncertainty
    # \todo pass in the detector uncertainty somehow.
    cornerUncertainty = 1.0
    R = np.eye(2) * cornerUncertainty * cornerUncertainty
    invR = np.linalg.inv(R)

    #Add reprojection error terms for both cameras
    reprojectionErrors0 = []
    reprojectionErrors1 = []

    for cidx, cam in enumerate([camL_geometry, camH_geometry]):
        sm.logDebug(
            "stereoCalibration: adding camera error terms for {0} calibration targets"
            .format(len(obslist)))

        #get the image and target points corresponding to the frame
        target = cam.ctarget.detector.target()

        #add error terms for all observations
        for view_id, obstuple in enumerate(obslist):

            #add error terms if we have an observation for this cam
            obs = obstuple[cidx]
            if obs is not None:
                T_cam_w = target_pose_dvs[view_id].toExpression().inverse()

                #add the baseline for the second camera
                if cidx != 0:
                    T_cam_w = baseline_dv.toExpression() * T_cam_w

                for i in range(0, target.size()):
                    p_target = aopt.HomogeneousExpression(
                        sm.toHomogeneous(target.point(i)))
                    valid, y = obs.imagePoint(i)
                    if valid:
                        # Create an error term.
                        rerr = cam.model.reprojectionError(
                            y, invR, T_cam_w * p_target, cam.dv)
                        rerr.idx = i
                        problem.addErrorTerm(rerr)

                        if cidx == 0:
                            reprojectionErrors0.append(rerr)
                        else:
                            reprojectionErrors1.append(rerr)

        sm.logDebug("stereoCalibrate: added {0} camera error terms".format(
            len(reprojectionErrors0) + len(reprojectionErrors1)))

    ############################################
    ## solve
    ############################################
    options = aopt.Optimizer2Options()
    options.verbose = True if sm.getLoggingLevel(
    ) == sm.LoggingLevel.Debug else False
    options.nThreads = 4
    options.convergenceDeltaX = 1e-3
    options.convergenceDeltaJ = 1
    options.maxIterations = 200
    options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)

    optimizer = aopt.Optimizer2(options)
    optimizer.setProblem(problem)

    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        sm.logDebug("Before optimization:")
        e2 = np.array([e.evaluateError() for e in reprojectionErrors0])
        sm.logDebug(
            " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))
        e2 = np.array([e.evaluateError() for e in reprojectionErrors1])
        sm.logDebug(
            " Reprojection error squarred (camH):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))

        sm.logDebug("baseline={0}".format(
            baseline_dv.toTransformationMatrix()))

    try:
        retval = optimizer.optimize()
        if retval.linearSolverFailure:
            sm.logError("stereoCalibrate: Optimization failed!")
        success = not retval.linearSolverFailure
    except:
        sm.logError("stereoCalibrate: Optimization failed!")
        success = False

    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        sm.logDebug("After optimization:")
        e2 = np.array([e.evaluateError() for e in reprojectionErrors0])
        sm.logDebug(
            " Reprojection error squarred (camL):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))
        e2 = np.array([e.evaluateError() for e in reprojectionErrors1])
        sm.logDebug(
            " Reprojection error squarred (camH):  mean {0}, median {1}, std: {2}"
            .format(np.mean(e2), np.median(e2), np.std(e2)))

    #verbose output
    if sm.getLoggingLevel() == sm.LoggingLevel.Debug:
        dL = camL_geometry.geometry.projection().distortion().getParameters(
        ).flatten()
        pL = camL_geometry.geometry.projection().getParameters().flatten()
        dH = camH_geometry.geometry.projection().distortion().getParameters(
        ).flatten()
        pH = camH_geometry.geometry.projection().getParameters().flatten()
        sm.logDebug("guess for intrinsics camL: {0}".format(pL))
        sm.logDebug("guess for intrinsics camH: {0}".format(pH))
        sm.logDebug("guess for distortion camL: {0}".format(dL))
        sm.logDebug("guess for distortion camH: {0}".format(dH))

    if success:
        baseline_HL = sm.Transformation(baseline_dv.toTransformationMatrix())
        return success, baseline_HL
    else:
        #return the initial guess if we fail
        return success, baseline_HL
Beispiel #10
0
def saveChainParametersYaml(cself, resultFile, graph, oldcamchain=None, cam_replace=None):
    cameraModels = {acvb.DistortedPinhole: 'pinhole',
                    acvb.EquidistantPinhole: 'pinhole',
                    acvb.FovPinhole: 'pinhole',
                    acvb.Omni: 'omni',
                    acvb.DistortedOmni: 'omni',
                    acvb.ExtendedUnified: 'eucm',
                    acvb.DoubleSphere: 'ds'}
    distortionModels = {acvb.DistortedPinhole: 'radtan',
                        acvb.EquidistantPinhole: 'equidistant',
                        acvb.FovPinhole: 'fov',
                        acvb.Omni: 'none',
                        acvb.DistortedOmni: 'radtan',
                        acvb.ExtendedUnified: 'none',
                        acvb.DoubleSphere: 'none'}
    chain = cr.CameraChainParameters(resultFile, createYaml=True)
    for cam_id, cam in enumerate(cself.cameras):
        cameraModel = cameraModels[cam.model]
        distortionModel = distortionModels[cam.model]

        #create new config file
        camParams = cr.CameraParameters(resultFile, createYaml=True)
        camParams.setRosTopic(cam.dataset.topic)

        #set the data
        P = cam.geometry.projection()
        if cameraModel == 'omni':
            camParams.setIntrinsics(cameraModel, [P.xi(), P.fu(), P.fv(), P.cu(), P.cv()] )
        elif cameraModel == 'pinhole':
            camParams.setIntrinsics(cameraModel, [P.fu(), P.fv(), P.cu(), P.cv()] )
        elif cameraModel == 'eucm':
            camParams.setIntrinsics(cameraModel, [P.alpha(), P.beta(), P.fu(), P.fv(), P.cu(), P.cv()] )
        elif cameraModel == 'ds':
            camParams.setIntrinsics(cameraModel, [P.xi(), P.alpha(), P.fu(), P.fv(), P.cu(), P.cv()] )
        else:
            raise RuntimeError("Invalid camera model {}.".format(cameraModel))
        camParams.setResolution( [P.ru(), P.rv()] )
        dist_coeffs = P.distortion().getParameters().flatten(1)
        camParams.setDistortion( distortionModel, dist_coeffs)

        chain.addCameraAtEnd(camParams)

    for cam_id, cam in enumerate(cself.cameras):
        overlaps = graph.getCamOverlaps(cam_id)
        chain.setCamOverlaps(cam_id, overlaps)
        
    #print all baselines in the camera chain
    for bidx, baseline_dv in enumerate(cself.baselines):
        baseline = sm.Transformation( baseline_dv.T() )
        camNr = bidx+1
        chain.setExtrinsicsLastCamToHere(camNr, baseline)

    # write yaml
    if oldcamchain is None:
        chain.writeYaml()
    else:
        for topic in cam_replace:
            camConfig,camNr = chain.getCameraParametersforTopic(topic)
            camConfig_old, camNr_old = oldcamchain.getCameraParametersforTopic(topic)
            camConfig_old.data["intrinsics"] = camConfig.data["intrinsics"]
            camConfig_old.data["distortion_coeffs"] = camConfig.data["distortion_coeffs"]
            oldcamchain.data["cam{0}".format(camNr_old)]=camConfig_old.getYamlDict()


        for topic in cam_replace:
            camConfig,camNr = chain.getCameraParametersforTopic(topic)
            camConfig_old, camNr_old = oldcamchain.getCameraParametersforTopic(topic)
            if camNr == 0 :
                print camNr
                camH_topic = chain.getCameraParameters(camNr+1).getRosTopic()
                camConfig_old_H, camNr_old_H = oldcamchain.getCameraParametersforTopic(camH_topic)
                baseline = chain.getExtrinsicsLastCamToHere(camNr+1).inverse()
                oldcamchain.setExtrinsicsSomeCamToHere(camNr_old, baseline, camNr_old_H)
            else :
                camL_topic = chain.getCameraParameters(camNr-1).getRosTopic()
                camConfig_old_L, camNr_old_L = oldcamchain.getCameraParametersforTopic(camL_topic)
                baseline = chain.getExtrinsicsLastCamToHere(camNr)
                oldcamchain.setExtrinsicsSomeCamToHere(camNr_old, baseline, camNr_old_L)

        oldcamchain.writeYaml(chain.yamlFile)