Esempio n. 1
0
 def fmin_calibration_func(self,displacements):
     
     # initial Mhat / camera center guess:
     Mhat, residuals = camera_math.getMhat(self.calibration_raw_6d)
     displaced_calibration = copy.copy(self.calibration_raw_6d)
     diff_Mhat = 100
     
     while diff_Mhat > 0.0005:
         camera_center = np.array(camera_math.center(Mhat).T[0])
         for i in range(displaced_calibration.shape[0]):
             # for distance correction term, the pan portion uses the planar radial distance, the tilt portion uses the full distance
             dist = np.linalg.norm(displaced_calibration[i,3:6] - camera_center)
             planar_dist = np.cos(self.calibration_raw_6d[i,1])*dist
             dist_arr = [planar_dist, dist]
             for j in range(2):
                 displaced_calibration[i,j] = displaced_calibration[i,j] + np.pi/2 - np.arccos(displacements[j]/dist_arr[j])
         new_Mhat, new_residuals = camera_math.getMhat(displaced_calibration)
         diff_Mhat = np.sum( np.abs(new_Mhat - Mhat) )
         Mhat = new_Mhat
         self.Mhat = Mhat
         self.camera_center = camera_center
         residuals = new_residuals
     print residuals, self.camera_center
     
     return residuals
Esempio n. 2
0
    def calibrate(self):

        K = cvNumpy.array_to_mat(np.eye(3))
        rvec_np = np.random.random(3)
        rvec = cvNumpy.array_to_mat(rvec_np)
        tvec_np = np.random.random(3)
        tvec = cvNumpy.array_to_mat(tvec_np)
        distCoeffs = cvNumpy.array_to_mat(np.zeros(4))

        # convert rotation vector to rotation matrix
        rmat = cvNumpy.array_to_mat(np.zeros([3,3]))
        cv.Rodrigues2( rvec, rmat )

        # generate P matrix
        self.R = cvNumpy.mat_to_array(rmat)
        self.t = cvNumpy.mat_to_array(tvec)
        self.K = cvNumpy.mat_to_array(K)
        Rt = np.hstack((self.R,self.t.T))
        self.P = np.dot( self.K, Rt )
        self.Mhat = self.P
        self.camera_center = camera_math.center(self.P)[:,0]

        # inverse Mhat:
        self.Mhatinv = np.linalg.pinv(self.Mhat)


        # generate fake 3D data:
        fake3d = np.random.random([10,3])
        fake2d = np.zeros([10,2])
        for i in range(np.shape(fake3d)[0]):
            p,t,f = self.to_motor_coords(fake3d[i,:])
            fake2d[i,:] = [p,t]


        ################# calibration ################3
        
        K_new = cvNumpy.array_to_mat(np.eye(3))
        rvec_np_new = np.random.random(3)
        rvec_new = cvNumpy.array_to_mat(rvec_np)
        tvec_np_new = np.random.random(3)
        tvec_new = cvNumpy.array_to_mat(tvec_np)
        distCoeffs_new = cvNumpy.array_to_mat(np.zeros(4))

        points3D = cvNumpy.array_to_mat(np.asarray( fake3d ))
        points2D = cvNumpy.array_to_mat(np.asarray( np.tan( fake2d ) ))

        cv.FindExtrinsicCameraParams2(points3D, points2D, K, distCoeffs, rvec, tvec)

        tvec_np_new = cvNumpy.mat_to_array(tvec)
        rvec_np_new = cvNumpy.mat_to_array(rvec)
        
        print 'original tvec: ', tvec_np
        print 'new tvec: ', tvec_np_new
        print 
        print 'original rvec: ', rvec_np
        print 'new rvec: ', rvec_np_new
Esempio n. 3
0
 def calibrate(self):
     
     self.Mhat, residuals = camera_math.getMhat(self.data)
     self.camera_center = np.array(camera_math.center(self.Mhat).T[0])
     print '*'*80
     print 'DLT camera center: ', self.camera_center
     print 'Mhat: '
     print self.Mhat
     self.Mhatinv = np.linalg.pinv(self.Mhat)
     #self.Mhat3x3inv = np.linalg.inv(self.Mhat[:,0:3]) # this is the left 3x3 section of Mhat, inverted
     #self.Mhat3x1 = self.Mhat[:,3] # this is the rightmost vertical vector of Mhat
         
     self.focus.calibrate(data=self.data, camera_center=self.camera_center)
     
     return        
Esempio n. 4
0
 def calibrate_pt_dlt(self, data=None):
     if data is None:
         data = self.calibration_raw_6d
     data3d = data[:,3:6]
     data2d = data[:,0:2]
     Pmat, residuals = camera_math.DLT(data3d, data2d)
     self.Mhat = Pmat
     self.Mhatinv = np.linalg.pinv(self.Mhat)
     self.camera_center = camera_math.center(Pmat)
     
     print 'mhat: '
     print self.Mhat
     print 'camera center: '
     print self.camera_center
     print 'residuals'
     print residuals
     print 'decomp'
     camera_math.decomp(self.Mhat, SHOW=1)
     return
Esempio n. 5
0
 def calibrate_pt(self, data=None, focal_length=1.):
     if data is None:
         data = self.calibration_raw_6d
     points3D = cvNumpy.array_to_mat(np.asarray( data[:,3:6] ))
     points2D = cvNumpy.array_to_mat(np.asarray( np.tan( data[:,0:2] ) ))
     K = cvNumpy.array_to_mat(np.eye(3))
     K[0,0] = focal_length
     K[1,1] = focal_length
     rvec = cvNumpy.array_to_mat(np.zeros(3))
     tvec = cvNumpy.array_to_mat(np.zeros(3))
     distCoeffs = cvNumpy.array_to_mat(np.zeros(4))
     cv.FindExtrinsicCameraParams2(points3D, points2D, K, distCoeffs, rvec, tvec)
     # "align_points" ? in flydra
     # convert rotation vector to rotation matrix
     rmat = cvNumpy.array_to_mat(np.zeros([3,3]))
     cv.Rodrigues2( rvec, rmat )
     
     # generate P matrix
     self.R = cvNumpy.mat_to_array(rmat)
     self.t = cvNumpy.mat_to_array(tvec)
     self.K = cvNumpy.mat_to_array(K)
     self.Rt = np.hstack((self.R,self.t.T))
     self.P = np.dot( self.K, self.Rt )
     self.Mhat = self.P
     self.camera_center = camera_math.center(self.P)[:,0]
     self.distCoeffs = cvNumpy.mat_to_array(distCoeffs)
     
     # get reprojection errors
     reprojected_points2D = cvNumpy.array_to_mat(np.asarray( ( data[:,0:2] ) ))
     cv.ProjectPoints2(points3D, rvec, tvec, K, distCoeffs, reprojected_points2D)
     
     points2D_arr = cvNumpy.mat_to_array(points2D)
     reprojected_points2D_arr = cvNumpy.mat_to_array(reprojected_points2D)
     
     errors = [ np.linalg.norm(points2D_arr[i,:] - reprojected_points2D_arr[i,:]) for i in range(points2D_arr.shape[0]) ]
     
     # inverse Mhat:
     self.Mhatinv = np.linalg.pinv(self.Mhat)
     
     return np.mean(errors)