Exemple #1
0
    def loadImuData(self):
        print "Reading IMU data ({0})".format(self.dataset.topic)

        # prepare progess bar
        iProgress = sm.Progress2(self.dataset.numMessages())
        iProgress.sample()

        Rgyro = np.eye(
            3) * self.gyroUncertaintyDiscrete * self.gyroUncertaintyDiscrete
        Raccel = np.eye(
            3) * self.accelUncertaintyDiscrete * self.accelUncertaintyDiscrete

        # Now read the imu measurements.
        imu = []
        for timestamp, omega, alpha in self.dataset:
            timestamp = acv.Time(timestamp.toSec())
            imu.append(
                self.ImuMeasurement(timestamp, omega, alpha, Rgyro, Raccel))
            iProgress.sample()

        self.imuData = imu

        if len(self.imuData) > 1:
            print "\r  Read %d imu readings over %.1f seconds                   " % (
                len(imu), imu[-1].stamp.toSec() - imu[0].stamp.toSec())
        else:
            sm.logFatal(
                "Could not find any IMU messages. Please check the dataset.")
            sys.exit(-1)
Exemple #2
0
    def maximization_step(self, t_source, target, estep_res, w=0.0,
                           objective_type='pt2pt'):
        m, ndim = t_source.shape
        n = target.shape[0]
        assert ndim == 3, "ndim must be 3."
        m0, m1, nx = estep_res
        c = w / (1.0 - w) * n / m
        m0[m0==0] = np.finfo(np.float32).eps
        m1m0 = np.divide(m1.T, m0).T
        m0m0 = m0 / (m0 + c)
        drxdx = m0m0
        errs = []
        self.problem.clearAllErrorTerms()
        if objective_type == 'pt2pt':
            for i in xrange(m):
                T_l_l = self.T_b_l_Dv.toExpression().inverse() * \
                        aopt.TransformationExpression(self._sensor_tfs[i]) * \
                        self.T_b_l_Dv.toExpression()
                predicted = T_l_l.toRotationExpression() * t_source[i] + T_l_l.toEuclideanExpression()
                err = ket.EuclideanError(m1m0[i], drxdx[i]*np.eye(3, dtype=np.float64), predicted)
                errs.append(err)
                self.problem.addErrorTerm(err)
        else:
            raise ValueError('Unknown objective_type: %s.' % objective_type)

        # define the optimization
        options = aopt.Optimizer2Options()
        options.verbose = False
        options.linearSolver = aopt.BlockCholeskyLinearSystemSolver()
        options.nThreads = 2
        options.convergenceDeltaX = 1e-4
        options.convergenceJDescentRatioThreshold = 1e-6
        options.maxIterations = 50

        # run the optimization
        optimizer = aopt.Optimizer2(options)
        optimizer.setProblem(self.problem)

        # get the prior
        try:
            optimizer.optimize()
        except:
            sm.logFatal("Failed to obtain orientation prior!")
            sys.exit(-1)

        q = np.array([np.linalg.norm(e.error()) for e in errs]).sum()
        return MstepResult(self.T_b_l_Dv.toExpression().toTransformationMatrix(), q)
Exemple #3
0
 def loadImuData(self):
     print "Reading IMU data ({0})".format(self.dataset.topic)
         
     # prepare progess bar
     iProgress = sm.Progress2( self.dataset.numMessages() )
     iProgress.sample()
     
     Rgyro = np.eye(3) * self.gyroUncertaintyDiscrete * self.gyroUncertaintyDiscrete
     Raccel = np.eye(3) * self.accelUncertaintyDiscrete * self.accelUncertaintyDiscrete
     
     # Now read the imu measurements.
     imu = []
     for timestamp, omega, alpha in self.dataset:
         timestamp = acv.Time( timestamp.toSec() ) 
         imu.append( self.ImuMeasurement(timestamp, omega, alpha, Rgyro, Raccel) )
         iProgress.sample()
     
     self.imuData = imu
     
     if len(self.imuData)>1:
         print "\r  Read %d imu readings over %.1f seconds                   " % (len(imu), imu[-1].stamp.toSec() - imu[0].stamp.toSec())
     else:
         sm.logFatal("Could not find any IMU messages. Please check the dataset.")
         sys.exit(-1)
Exemple #4
0
    def findOrientationPriorCameraToImu(self, imu):
        print
        print "Estimating imu-camera rotation prior"

        # build the problem
        problem = aopt.OptimizationProblem()

        # Add the rotation as design variable
        q_i_c_Dv = aopt.RotationQuaternionDv(self.T_extrinsic.q())
        q_i_c_Dv.setActive(True)
        problem.addDesignVariable(q_i_c_Dv)

        # Add the gyro bias as design variable
        gyroBiasDv = aopt.EuclideanPointDv(np.zeros(3))
        gyroBiasDv.setActive(True)
        problem.addDesignVariable(gyroBiasDv)

        #initialize a pose spline using the camera poses
        poseSpline = self.initPoseSplineFromCamera(timeOffsetPadding=0.0)

        for im in imu.imuData:
            tk = im.stamp.toSec()
            if tk > poseSpline.t_min() and tk < poseSpline.t_max():
                #DV expressions
                R_i_c = q_i_c_Dv.toExpression()
                bias = gyroBiasDv.toExpression()

                #get the vision predicted omega and measured omega (IMU)
                omega_predicted = R_i_c * aopt.EuclideanExpression(
                    np.matrix(
                        poseSpline.angularVelocityBodyFrame(tk)).transpose())
                omega_measured = im.omega

                #error term
                gerr = ket.GyroscopeError(omega_measured, im.omegaInvR,
                                          omega_predicted, bias)
                problem.addErrorTerm(gerr)

        #define the optimization
        options = aopt.Optimizer2Options()
        options.verbose = False
        options.linearSolver = aopt.BlockCholeskyLinearSystemSolver()
        options.nThreads = 2
        options.convergenceDeltaX = 1e-4
        options.convergenceDeltaJ = 1
        options.maxIterations = 50

        #run the optimization
        optimizer = aopt.Optimizer2(options)
        optimizer.setProblem(problem)

        #get the prior
        try:
            optimizer.optimize()
        except:
            sm.logFatal("Failed to obtain orientation prior!")
            sys.exit(-1)

        #overwrite the external rotation prior (keep the external translation prior)
        R_i_c = q_i_c_Dv.toRotationMatrix().transpose()
        self.T_extrinsic = sm.Transformation(
            sm.rt2Transform(R_i_c, self.T_extrinsic.t()))

        #set the gyro bias prior (if we have more than 1 cameras use recursive average)
        b_gyro = bias.toEuclidean()
        imu.GyroBiasPriorCount += 1
        imu.GyroBiasPrior = (
            imu.GyroBiasPriorCount - 1.0
        ) / imu.GyroBiasPriorCount * imu.GyroBiasPrior + 1.0 / imu.GyroBiasPriorCount * b_gyro

        #print result
        print "  Orientation prior camera-imu found as: (T_i_c)"
        print R_i_c
        print "  Gyro bias prior found as: (b_gyro)"
        print b_gyro
Exemple #5
0
def extractCornersFromDataset(dataset,
                              detector,
                              multithreading=False,
                              numProcesses=None,
                              clearImages=True,
                              noTransformation=False):
    print("Extracting calibration target corners")
    targetObservations = []
    numImages = dataset.numImages()

    # prepare progess bar
    iProgress = sm.Progress2(numImages)
    iProgress.sample()

    if multithreading:
        if not numProcesses:
            numProcesses = max(1, multiprocessing.cpu_count() - 1)
        try:
            manager = multiprocessing.Manager()
            resultq = manager.Queue()
            manager2 = multiprocessing.Manager()
            taskq = manager2.Queue()

            for idx, (timestamp, image) in enumerate(dataset.readDataset()):
                taskq.put((idx, timestamp, image))

            plist = list()
            for pidx in range(0, numProcesses):
                detector_copy = copy.copy(detector)
                p = multiprocessing.Process(target=multicoreExtractionWrapper,
                                            args=(
                                                detector_copy,
                                                taskq,
                                                resultq,
                                                clearImages,
                                                noTransformation,
                                            ))
                p.start()
                plist.append(p)

            #wait for results
            last_done = 0
            while 1:
                if all([not p.is_alive() for p in plist]):
                    time.sleep(0.1)
                    break
                done = numImages - taskq.qsize()
                sys.stdout.flush()
                if (done - last_done) > 0:
                    iProgress.sample(done - last_done)
                last_done = done
                time.sleep(0.5)
            resultq.put('STOP')
        except Exception as e:
            raise RuntimeError(
                "Exception during multithreaded extraction: {0}".format(e))

        #get result sorted by time (=idx)
        if resultq.qsize() > 1:
            targetObservations = [[]] * (resultq.qsize() - 1)
            for lidx, data in enumerate(iter(resultq.get, 'STOP')):
                obs = data[0]
                time_idx = data[1]
                targetObservations[lidx] = (time_idx, obs)
            targetObservations = list(
                zip(*sorted(targetObservations, key=lambda tup: tup[0])))[1]
        else:
            targetObservations = []

    #single threaded implementation
    else:
        for timestamp, image in dataset.readDataset():
            if noTransformation:
                success, observation = detector.findTargetNoTransformation(
                    timestamp, np.array(image))
            else:
                success, observation = detector.findTarget(
                    timestamp, np.array(image))
            if clearImages:
                observation.clearImage()
            if success == 1:
                targetObservations.append(observation)
            iProgress.sample()

    if len(targetObservations) == 0:
        print("\r")
        sm.logFatal(
            "No corners could be extracted for camera {0}! Check the calibration target configuration and dataset."
            .format(dataset.topic))
    else:
        print(
            "\r  Extracted corners for %d images (of %d images)                              "
            % (len(targetObservations), numImages))

    #close all opencv windows that might be open
    cv2.destroyAllWindows()

    return targetObservations
Exemple #6
0
            for lidx, data in enumerate(iter(resultq.get, 'STOP')):
                obs=data[0]; time_idx = data[1]
                targetObservations[lidx] = (time_idx, obs)
            targetObservations = list(zip(*sorted(targetObservations, key=lambda tup: tup[0]))[1])
        else:
            targetObservations=[]
    
    #single threaded implementation
    else:
        for timestamp, image in dataset.readDataset():
            if noTransformation:
                success, observation = detector.findTargetNoTransformation(timestamp, np.array(image))
            else:
                success, observation = detector.findTarget(timestamp, np.array(image))
            if clearImages:
                observation.clearImage()
            if success == 1:
                targetObservations.append(observation)
            iProgress.sample()

    if len(targetObservations) == 0:
        print "\r"
        sm.logFatal("No corners could be extracted for camera {0}! Check the calibration target configuration and dataset.".format(dataset.topic))
    else:    
        print "\r  Extracted corners for %d images (of %d images)                              " % (len(targetObservations), numImages)

    #close all opencv windows that might be open
    cv2.destroyAllWindows()
    
    return targetObservations
Exemple #7
0
            targetObservations = []

    #single threaded implementation
    else:
        for timestamp, image in dataset.readDataset():
            if noTransformation:
                success, observation = detector.findTargetNoTransformation(
                    timestamp, np.array(image))
            else:
                success, observation = detector.findTarget(
                    timestamp, np.array(image))
            if clearImages:
                observation.clearImage()
            if success == 1:
                targetObservations.append(observation)
            iProgress.sample()

    if len(targetObservations) == 0:
        print "\r"
        sm.logFatal(
            "No corners could be extracted for camera {0}! Check the calibration target configuration and dataset."
            .format(dataset.topic))
    else:
        print "\r  Extracted corners for %d images (of %d images)                              " % (
            len(targetObservations), numImages)

    #close all opencv windows that might be open
    cv2.destroyAllWindows()

    return targetObservations
Exemple #8
0
    def findOrientationPriorCameraToImu(self, imu):
        print
        print "Estimating imu-camera rotation prior"
        
        # build the problem
        problem = aopt.OptimizationProblem()

        # Add the rotation as design variable
        q_i_c_Dv = aopt.RotationQuaternionDv(  self.T_extrinsic.q() )
        q_i_c_Dv.setActive( True )
        problem.addDesignVariable(q_i_c_Dv)

        # Add the gyro bias as design variable
        gyroBiasDv = aopt.EuclideanPointDv( np.zeros(3) )
        gyroBiasDv.setActive( True )
        problem.addDesignVariable(gyroBiasDv)
        
        #initialize a pose spline using the camera poses
        poseSpline = self.initPoseSplineFromCamera( timeOffsetPadding=0.0 )
        
        for im in imu.imuData:
            tk = im.stamp.toSec()
            if tk > poseSpline.t_min() and tk < poseSpline.t_max():        
                #DV expressions
                R_i_c = q_i_c_Dv.toExpression()
                bias = gyroBiasDv.toExpression()   
                
                #get the vision predicted omega and measured omega (IMU)
                omega_predicted = R_i_c * aopt.EuclideanExpression( np.matrix( poseSpline.angularVelocityBodyFrame( tk ) ).transpose() )
                omega_measured = im.omega
                
                #error term
                gerr = ket.GyroscopeError(omega_measured, im.omegaInvR, omega_predicted, bias)
                problem.addErrorTerm(gerr)
        
        #define the optimization 
        options = aopt.Optimizer2Options()
        options.verbose = False
        options.linearSolver = aopt.BlockCholeskyLinearSystemSolver()
        options.nThreads = 2
        options.convergenceDeltaX = 1e-4
        options.convergenceDeltaJ = 1
        options.maxIterations = 50

        #run the optimization
        optimizer = aopt.Optimizer2(options)
        optimizer.setProblem(problem)
        
        #get the prior
        try:
            optimizer.optimize()
        except:
            sm.logFatal("Failed to obtain orientation prior!")
            sys.exit(-1)

        #overwrite the external rotation prior (keep the external translation prior)
        R_i_c = q_i_c_Dv.toRotationMatrix().transpose()
        self.T_extrinsic = sm.Transformation( sm.rt2Transform( R_i_c, self.T_extrinsic.t() ) )

        #set the gyro bias prior (if we have more than 1 cameras use recursive average)
        b_gyro = bias.toEuclidean() 
        imu.GyroBiasPriorCount += 1
        imu.GyroBiasPrior = (imu.GyroBiasPriorCount-1.0)/imu.GyroBiasPriorCount * imu.GyroBiasPrior + 1.0/imu.GyroBiasPriorCount*b_gyro

        #print result
        print "  Orientation prior camera-imu found as: (T_i_c)"
        print R_i_c
        print "  Gyro bias prior found as: (b_gyro)"
        print b_gyro