def set_alpha(self, a): """ Set the alpha value for the calibrated camera solution. The alpha value is a zoom, and ranges from 0 (zoomed in, all pixels in calibrated image are valid) to 1 (zoomed out, all pixels in original image are in calibrated image). """ cv.StereoRectify(self.l.intrinsics, self.r.intrinsics, self.l.distortion, self.r.distortion, self.size, self.R, self.T, self.l.R, self.r.R, self.l.P, self.r.P, alpha=a) cv.InitUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.P, self.l.mapx, self.l.mapy) cv.InitUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.P, self.r.mapx, self.r.mapy)
def __init__(self, config): self.config = config.rig self.performance = config.performance cal_mat = loadmat(self.config.calib_file) self.size = (cal_mat.get('ny')[0, 0], cal_mat.get('nx')[0, 0]) self.KK_right = cv.fromarray(cal_mat.get('KK_right').copy()) self.KK_left = cv.fromarray(cal_mat.get('KK_left').copy()) self.kc_right = cv.fromarray(cal_mat.get('kc_right').copy()) self.kc_left = cv.fromarray(cal_mat.get('kc_left').copy()) self.T = cv.fromarray(cal_mat.get('T').copy()) self.R = cv.fromarray(cal_mat.get('R').copy()) if self.performance.predownscale: self.size = (self.size[0] / 2, self.size[1] / 2) self.KK_left[0, 0] /= 2. self.KK_left[1, 1] /= 2. self.KK_left[0, 2] /= 2. self.KK_left[1, 2] /= 2. self.KK_right[0, 0] /= 2. self.KK_right[1, 1] /= 2. self.KK_right[0, 2] /= 2. self.KK_right[1, 2] /= 2. self.size_short = (self.size[0] / self.performance.short_factor, self.size[1]) self.size_small = (self.size[0] / self.performance.small_factor, self.size[1] / self.performance.small_factor) self.R1 = cv.CreateMat(3, 3, cv.CV_64F) self.R2 = cv.CreateMat(3, 3, cv.CV_64F) self.P1 = cv.CreateMat(3, 4, cv.CV_64F) self.P2 = cv.CreateMat(3, 4, cv.CV_64F) self.Q = cv.CreateMat(4, 4, cv.CV_64F) self.rect_map1x = cv.CreateMat(self.size[0], self.size[1], cv.CV_32FC1) self.rect_map1y = cv.CreateMat(self.size[0], self.size[1], cv.CV_32FC1) self.rect_map2x = cv.CreateMat(self.size[0], self.size[1], cv.CV_32FC1) self.rect_map2y = cv.CreateMat(self.size[0], self.size[1], cv.CV_32FC1) cv.StereoRectify(self.KK_left, self.KK_right, self.kc_left, self.kc_right, self.size, self.R, self.T, self.R1, self.R2, self.P1, self.P2, self.Q) #, alpha=0 cv.InitUndistortRectifyMap(self.KK_left, self.kc_left, self.R1, self.P1, self.rect_map1x, self.rect_map1y) cv.InitUndistortRectifyMap(self.KK_right, self.kc_right, self.R2, self.P2, self.rect_map2x, self.rect_map2y) self.rotscale_map = cv.CreateMat(2, 3, cv.CV_32FC1) self.alt = np.float32(np.zeros(self.size_short[0]))
def rectifyImage(self, raw, rectified): """ :param raw: input image :type raw: :class:`CvMat` or :class:`IplImage` :param rectified: rectified output image :type rectified: :class:`CvMat` or :class:`IplImage` Applies the rectification specified by camera parameters :math:`K` and and :math:`D` to image `raw` and writes the resulting image `rectified`. """ self.mapx = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_32F, 1) self.mapy = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_32F, 1) cv.InitUndistortRectifyMap(self.K, self.D, self.R, self.P, self.mapx, self.mapy) cv.Remap(raw, rectified, self.mapx, self.mapy)
def set_alpha(self, a): """ Set the alpha value for the calibrated camera solution. The alpha value is a zoom, and ranges from 0 (zoomed in, all pixels in calibrated image are valid) to 1 (zoomed out, all pixels in original image are in calibrated image). """ # NOTE: Prior to Electric, this code was broken such that we never actually saved the new # camera matrix. In effect, this enforced P = [K|0] for monocular cameras. # TODO: Verify that OpenCV #1199 gets applied (improved GetOptimalNewCameraMatrix) ncm = cv.GetSubRect(self.P, (0, 0, 3, 3)) cv.GetOptimalNewCameraMatrix(self.intrinsics, self.distortion, self.size, a, ncm) cv.InitUndistortRectifyMap(self.intrinsics, self.distortion, self.R, ncm, self.mapx, self.mapy)
def calibrateFromObservations(self, observations, calibextrinsic=True, pruneoutliers=True, full_output=True, fixprincipalpoint=False, Tcameraestimate=None): while True: if len(observations) < 3: raise self.CalibrationError('very little observations: %d!' % len(observations), action=1) object_points = self.getObjectPoints() all_object_points = [] all_corners = [] imagesize = None for o in observations: all_object_points.append(object_points) all_corners.append(o['corners_raw']) if imagesize: assert imagesize[0] == o['image_raw'].shape[ 1] and imagesize[1] == o['image_raw'].shape[0] else: imagesize = (o['image_raw'].shape[1], o['image_raw'].shape[0]) intrinsiccondition = self.validateCalibrationData( all_object_points, all_corners) if intrinsiccondition is None: raise self.CalibrationError( 'bad condition number, %d observations' % (len(observations)), action=1) KKorig, kcorig, Traws, error_intrinsic, error_grad = self.calibrateIntrinsicCamera( all_object_points, all_corners, imagesize, fixprincipalpoint=fixprincipalpoint, computegradients=True) cvKKorig = cv.fromarray(KKorig) cvkcorig = cv.fromarray(kcorig) thresh = median(error_intrinsic) + 0.5 if any(error_intrinsic > thresh): # compute again using a pruned set of observations print 'pruning observations (thresh=%f) because intrinsic error is: ' % thresh, error_intrinsic observations = [ o for i, o in enumerate(observations) if error_intrinsic[i] <= thresh ] else: if mean(error_intrinsic) > 0.6: raise self.CalibrationError( 'intrinsic error is huge (%s)! giving up, KK=(%s)' % (str(error_intrinsic), str(KKorig))) break if not calibextrinsic: return KKorig, kcorig, None, { 'error_intrinsic': error_intrinsic, 'intrinsiccondition': intrinsiccondition, 'error_grad': error_grad } if Tcameraestimate is None: raise TypeError( 'Tcameraestimate needs to be initialized to a 4x4 matrix') # unwarp all images and re-detect the checkerboard patterns cvKK = cv.CreateMat(3, 3, cv.CV_64F) cv.GetOptimalNewCameraMatrix(cvKKorig, cvkcorig, imagesize, 0, cvKK) cvUndistortionMapX = cv.CreateMat(imagesize[1], imagesize[0], cv.CV_32F) cvUndistortionMapY = cv.CreateMat(imagesize[1], imagesize[0], cv.CV_32F) cv.InitUndistortRectifyMap(cvKKorig, cvkcorig, cv.fromarray(eye(3)), cvKK, cvUndistortionMapX, cvUndistortionMapY) KK = array(cvKK) KKinv = linalg.inv(KK) if full_output: print 'KKorig: ', KKorig, ' KK:', KK cvimage = None cvkczero = cv.fromarray(zeros(kcorig.shape)) cvrvec = cv.CreateMat(1, 3, cv.CV_64F) cvtvec = cv.CreateMat(1, 3, cv.CV_64F) all_optimization_data = [] Tworldpatternestimates = [] for i, o in enumerate(observations): cvimage_dist = cv.fromarray(o['image_raw']) if not cvimage: cvimage = cv.CloneMat(cvimage_dist) cv.Remap(cvimage_dist, cvimage, cvUndistortionMapX, cvUndistortionMapY, cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS) corners = self.detect(cvimage) if corners is not None: cv.FindExtrinsicCameraParams2(cv.fromarray(object_points), cv.fromarray(corners), cvKK, cvkczero, cvrvec, cvtvec) T = matrixFromAxisAngle(array(cvrvec)[0]) T[0:3, 3] = array(cvtvec)[0] Tworldpatternestimates.append( dot(dot(o['Tlink'], Tcameraestimate), T)) all_optimization_data.append( (transformPoints(KKinv, corners), linalg.inv(o['Tlink']))) else: print 'could not detect original pattern ', i # have the following equation: Tlink * Tcamera * Tdetectedpattern * corners3d = Tworldpattern * corners3d # need to solve for Tcamera and Tworldpattern # instead of using Tdetectedpattern, use projected difference: # corners - proj( inv(Tcamera) * inv(Tlink) * Tworldpattern *corners3d) corners3d = self.getObjectPoints() quatWorldPatternEstimates = array([ quatFromRotationMatrix(T[0:3, 0:3]) for T in Tworldpatternestimates ]) quatWorldPatternInitial, success = leastsq( lambda q: quatArrayTDist(q / sqrt(sum(q**2)), quatWorldPatternEstimates), quatWorldPatternEstimates[0], maxfev=100000, epsfcn=1e-6) rWorldPatternInitial = zeros(6, float64) rWorldPatternInitial[0:3] = axisAngleFromQuat(quatWorldPatternInitial) rWorldPatternInitial[3:6] = mean( array([T[0:3, 3] for T in Tworldpatternestimates]), 0) Tcameraestimateinv = linalg.inv(Tcameraestimate) rCameraInitial = zeros(6, float64) rCameraInitial[0:3] = axisAngleFromRotationMatrix( Tcameraestimateinv[0:3, 0:3]) rCameraInitial[3:6] = Tcameraestimateinv[0:3, 3] def errorfn(x, optimization_data): Tworldpattern = matrixFromAxisAngle(x[0:3]) Tworldpattern[0:3, 3] = x[3:6] Tcamerainv = matrixFromAxisAngle(x[6:9]) Tcamerainv[0:3, 3] = x[9:12] err = zeros(len(optimization_data) * len(corners3d) * 2) off = 0 for measuredcameracorners, Tlinkinv in optimization_data: cameracorners3d = transformPoints( dot(dot(Tcamerainv, Tlinkinv), Tworldpattern), corners3d) iz = 1.0 / cameracorners3d[:, 2] err[off:( off + len(corners3d) )] = measuredcameracorners[:, 0] - cameracorners3d[:, 0] * iz off += len(corners3d) err[off:( off + len(corners3d) )] = measuredcameracorners[:, 1] - cameracorners3d[:, 1] * iz off += len(corners3d) if full_output: print 'rms: ', sqrt(sum(err**2)) return err optimization_data = all_optimization_data xorig, cov_x, infodict, mesg, iter = leastsq( lambda x: errorfn(x, all_optimization_data), r_[rWorldPatternInitial, rCameraInitial], maxfev=100000, epsfcn=1e-6, full_output=1) if pruneoutliers: # prune the images with the most error errors = reshape( errorfn(xorig, all_optimization_data)**2, (len(all_optimization_data), len(corners3d) * 2)) errorreprojection = mean( sqrt(KK[0, 0]**2 * errors[:, 0:len(corners3d)] + KK[1, 1]**2 * errors[:, len(corners3d):]), 1) #thresh=mean(errorreprojection)+std(errorreprojection) thresh = median( errorreprojection) + 20 #0.5*std(errorreprojection) print 'thresh:', thresh, 'errors:', errorreprojection optimization_data = [ all_optimization_data[i] for i in flatnonzero(errorreprojection <= thresh) ] x, cov_x, infodict, mesg, iter = leastsq( lambda x: errorfn(x, optimization_data), xorig, maxfev=100000, epsfcn=1e-6, full_output=1) else: x = xorig Tcamerainv = matrixFromAxisAngle(x[6:9]) Tcamerainv[0:3, 3] = x[9:12] Tcamera = linalg.inv(Tcamerainv) Tworldpattern = matrixFromAxisAngle(x[0:3]) Tworldpattern[0:3, 3] = x[3:6] points3d = self.Compute3DObservationPoints(Tcamerainv, optimization_data) if full_output: errors = reshape( errorfn(x, optimization_data)**2, (len(optimization_data), len(corners3d) * 2)) error_reprojection = sqrt( KK[0, 0]**2 * errors[:, 0:len(corners3d)] + KK[1, 1]**2 * errors[:, len(corners3d):]).flatten() print 'final reprojection error (pixels): ', mean( error_reprojection), std(error_reprojection) error_3d = sqrt( sum((transformPoints(Tworldpattern, corners3d) - points3d)**2, 1)) print '3d error: ', mean(error_3d), std(error_3d) return KKorig, kcorig, Tcamera, { 'error_reprojection': error_reprojection, 'error_3d': error_3d, 'error_intrinsic': error_intrinsic, 'intrinsiccondition': intrinsiccondition, 'error_grad': error_grad, 'KK': array(cvKK) } return KKorig, kcorig, Tcamera, None