示例#1
0
    def initDesignVariables(self, problem, poseSpline, noTimeCalibration, \
                            estimateGravityLength=False, initialGravityEstimate=np.array([0.0, 9.81, 0.0])):
        # Initialize the system pose spline
        self.poseDv = asp.BSplinePoseDesignVariable(poseSpline)
        addSplineDesignVariables(problem, self.poseDv)

        if self.ImuList:
            # Add the calibration target orientation design variable. (expressed as gravity vector in target frame)
            if estimateGravityLength:
                self.gravityDv = aopt.EuclideanPointDv(initialGravityEstimate)
            else:
                self.gravityDv = aopt.EuclideanDirection(
                    initialGravityEstimate)
            self.gravityExpression = self.gravityDv.toExpression()
            self.gravityDv.setActive(True)
            problem.addDesignVariable(self.gravityDv, HELPER_GROUP_ID)

        # Add targets, fix the first target
        self.targets[0].addDesignVariables(problem, fixed=True)
        for target in self.targets[1:]:
            target.addDesignVariables(problem)

        # Add all DVs for all IMUs
        for imu in self.ImuList:
            imu.addDesignVariables(problem)

        # Add all DVs for the camera chain
        self.CameraChain.addDesignVariables(problem, noTimeCalibration)

        for lidar in self.LiDARList:
            lidar.addDesignVariables(problem, noTimeCalibration)
示例#2
0
def addPoseDesignVariable(problem, T0=sm.Transformation()):
    q_Dv = aopt.RotationQuaternionDv(T0.q())
    q_Dv.setActive(True)
    problem.addDesignVariable(q_Dv)
    t_Dv = aopt.EuclideanPointDv(T0.t())
    t_Dv.setActive(True)
    problem.addDesignVariable(t_Dv)
    return aopt.TransformationBasicDv(q_Dv.toExpression(), t_Dv.toExpression())
示例#3
0
    def initDesignVariables(self, problem, poseSpline, noTimeCalibration, estimateGravityLength=False):        
        # Initialize the system pose spline (always attached to imu0) 
        self.poseDv = asp.BSplinePoseDesignVariable( poseSpline )
        addSplineDesignVariables(problem, self.poseDv)

        # Add the calibration target orientation design variable. (expressed as gravity vector in target frame)
        if estimateGravityLength:
            self.gravityDv = aopt.EuclideanPointDv( np.array([0.0,-9.81,0.0]) )
        else:
            self.gravityDv = aopt.EuclideanDirection( np.array([0.0,-9.81,0.0]) )
        self.gravityExpression = self.gravityDv.toExpression()  
        self.gravityDv.setActive( True )
        problem.addDesignVariable(self.gravityDv, HELPER_GROUP_ID)
        
        #Add all DVs for all IMUs
        for imu in self.ImuList:
            imu.addDesignVariables( problem )
        
        #Add all DVs for the camera chain    
        self.CameraChain.addDesignVariables( problem, noTimeCalibration )
示例#4
0
    def findOrientationPriorCameraToImu(self, imu):
        print
        print "Estimating imu-camera rotation prior"

        # build the problem
        problem = aopt.OptimizationProblem()

        # Add the rotation as design variable
        q_i_c_Dv = aopt.RotationQuaternionDv(self.T_extrinsic.q())
        q_i_c_Dv.setActive(True)
        problem.addDesignVariable(q_i_c_Dv)

        # Add the gyro bias as design variable
        gyroBiasDv = aopt.EuclideanPointDv(np.zeros(3))
        gyroBiasDv.setActive(True)
        problem.addDesignVariable(gyroBiasDv)

        #initialize a pose spline using the camera poses
        poseSpline = self.initPoseSplineFromCamera(timeOffsetPadding=0.0)

        for im in imu.imuData:
            tk = im.stamp.toSec()
            if tk > poseSpline.t_min() and tk < poseSpline.t_max():
                #DV expressions
                R_i_c = q_i_c_Dv.toExpression()
                bias = gyroBiasDv.toExpression()

                #get the vision predicted omega and measured omega (IMU)
                omega_predicted = R_i_c * aopt.EuclideanExpression(
                    np.matrix(
                        poseSpline.angularVelocityBodyFrame(tk)).transpose())
                omega_measured = im.omega

                #error term
                gerr = ket.GyroscopeError(omega_measured, im.omegaInvR,
                                          omega_predicted, bias)
                problem.addErrorTerm(gerr)

        #define the optimization
        options = aopt.Optimizer2Options()
        options.verbose = False
        options.linearSolver = aopt.BlockCholeskyLinearSystemSolver()
        options.nThreads = 2
        options.convergenceDeltaX = 1e-4
        options.convergenceDeltaJ = 1
        options.maxIterations = 50

        #run the optimization
        optimizer = aopt.Optimizer2(options)
        optimizer.setProblem(problem)

        #get the prior
        try:
            optimizer.optimize()
        except:
            sm.logFatal("Failed to obtain orientation prior!")
            sys.exit(-1)

        #overwrite the external rotation prior (keep the external translation prior)
        R_i_c = q_i_c_Dv.toRotationMatrix().transpose()
        self.T_extrinsic = sm.Transformation(
            sm.rt2Transform(R_i_c, self.T_extrinsic.t()))

        #set the gyro bias prior (if we have more than 1 cameras use recursive average)
        b_gyro = bias.toEuclidean()
        imu.GyroBiasPriorCount += 1
        imu.GyroBiasPrior = (
            imu.GyroBiasPriorCount - 1.0
        ) / imu.GyroBiasPriorCount * imu.GyroBiasPrior + 1.0 / imu.GyroBiasPriorCount * b_gyro

        #print result
        print "  Orientation prior camera-imu found as: (T_i_c)"
        print R_i_c
        print "  Gyro bias prior found as: (b_gyro)"
        print b_gyro