def do_calibration(self): if not self.goodenough: print "Can not calibrate yet!" return #append all things in db good_corners = [ corners for (params, corners, object_points) in self.db ] good_points = [ object_points for (params, corners, object_points) in self.db ] intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1) if self.calib_flags & cv2.CALIB_RATIONAL_MODEL: distortion = cv.CreateMat(8, 1, cv.CV_64FC1) # rational polynomial else: distortion = cv.CreateMat(5, 1, cv.CV_64FC1) # plumb bob cv.SetZero(intrinsics) cv.SetZero(distortion) # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio intrinsics[0, 0] = 1.0 intrinsics[1, 1] = 1.0 opts = self.mk_object_points(good_points) ipts = self.mk_image_points(good_corners) npts = self.mk_point_counts(good_points) cv.CalibrateCamera2(opts, ipts, npts, self.size, intrinsics, distortion, cv.CreateMat(len(good_corners), 3, cv.CV_32FC1), cv.CreateMat(len(good_corners), 3, cv.CV_32FC1), flags=self.calib_flags) self.intrinsics = intrinsics self.distortion = distortion # R is identity matrix for monocular calibration self.R = cv.CreateMat(3, 3, cv.CV_64FC1) cv.SetIdentity(self.R) self.P = cv.CreateMat(3, 4, cv.CV_64FC1) cv.SetZero(self.P) ncm = cv.GetSubRect(self.P, (0, 0, 3, 3)) cv.GetOptimalNewCameraMatrix(self.intrinsics, self.distortion, self.size, self.alpha, ncm) print self.size self.calibrated = True return (self.distortion, self.intrinsics, self.R, self.P)
def calibrate(self): image_points_mat = concatenate_mats(self.image_points) object_points_mat = concatenate_mats(self.object_points) print "Image Points:" for row in range(image_points_mat.height): for col in range(image_points_mat.width): print image_points_mat[row, col] print print "Object Points:" for row in range(object_points_mat.height): for col in range(object_points_mat.width): print object_points_mat[row, col] print point_counts_mat = cv.CreateMat(1, self.number_of_scenes, cv.CV_32SC1) for i in range(self.number_of_scenes): point_counts_mat[0, i] = self.image_points[i].width intrinsics = cv.CreateMat(3, 3, cv.CV_32FC1) distortion = cv.CreateMat(4, 1, cv.CV_32FC1) cv.SetZero(intrinsics) cv.SetZero(distortion) size = (self.projector_info.width, self.projector_info.height) tvecs = cv.CreateMat(self.number_of_scenes, 3, cv.CV_32FC1) rvecs = cv.CreateMat(self.number_of_scenes, 3, cv.CV_32FC1) cv.CalibrateCamera2(object_points_mat, image_points_mat, point_counts_mat, size, intrinsics, distortion, rvecs, tvecs, flags=0) R = cv.CreateMat(3, 3, cv.CV_32FC1) P = cv.CreateMat(3, 4, cv.CV_32FC1) cv.SetIdentity(R) cv.SetZero(P) cv.Copy(intrinsics, cv.GetSubRect(P, (0, 0, 3, 3))) self.projector_info = create_msg(size, distortion, intrinsics, R, P) rospy.loginfo(self.projector_info) for col in range(3): for row in range(self.number_of_scenes): print tvecs[row, col], print
def cal_fromcorners(self, good): """ :param good: Good corner positions and boards :type good: [(corners, ChessboardInfo)] """ boards = [b for (_, b) in good] ipts = self.mk_image_points(good) opts = self.mk_object_points(boards) npts = self.mk_point_counts(boards) intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1) if self.calib_flags & cv2.CALIB_RATIONAL_MODEL: distortion = cv.CreateMat(8, 1, cv.CV_64FC1) # rational polynomial else: distortion = cv.CreateMat(5, 1, cv.CV_64FC1) # plumb bob cv.SetZero(intrinsics) cv.SetZero(distortion) # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio intrinsics[0, 0] = 1.0 intrinsics[1, 1] = 1.0 cv.CalibrateCamera2(opts, ipts, npts, self.size, intrinsics, distortion, cv.CreateMat(len(good), 3, cv.CV_32FC1), cv.CreateMat(len(good), 3, cv.CV_32FC1), flags=self.calib_flags) self.intrinsics = intrinsics self.distortion = distortion # R is identity matrix for monocular calibration self.R = cv.CreateMat(3, 3, cv.CV_64FC1) cv.SetIdentity(self.R) self.P = cv.CreateMat(3, 4, cv.CV_64FC1) cv.SetZero(self.P) self.mapx = cv.CreateImage(self.size, cv.IPL_DEPTH_32F, 1) self.mapy = cv.CreateImage(self.size, cv.IPL_DEPTH_32F, 1) self.set_alpha(0.0)
def on_key_s(frame): global store_corners, rowcols, intrinsics, distortion if len(store_corners) < 1: print "No calibration yet. hold a chessboard in front of the cam and pres <space>" return ipts = mk_image_points(store_corners, rowcols) opts = mk_object_points(len(store_corners), rowcols, 1) npts = mk_point_counts(len(store_corners), rowcols) intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1) distortion = cv.CreateMat(4, 1, cv.CV_64FC1) cv.SetZero(intrinsics) cv.SetZero(distortion) # focal lengths have 1/1 ratio intrinsics[0, 0] = 1.0 intrinsics[1, 1] = 1.0 cv.CalibrateCamera2(opts, ipts, npts, cv.GetSize(frame), intrinsics, distortion, cv.CreateMat(len(store_corners), 3, cv.CV_32FC1), cv.CreateMat(len(store_corners), 3, cv.CV_32FC1), flags=0) # cv.CV_CALIB_ZERO_TANGENT_DIST) print[[distortion[i, j] for j in range(0, distortion.cols)] for i in range(0, distortion.rows)] print[[intrinsics[i, j] for j in range(0, intrinsics.cols)] for i in range(0, intrinsics.rows)] cv.Save('intrinsics.xml', intrinsics) cv.Save('distortion.xml', distortion) intrinsics = cv.Load('intrinsics.xml') distortion = cv.Load('distortion.xml') store_corners = []
def calcIntrasec(self, gui): """Calculate focal and distortion from computed points """ count = len(self.points) numpoints = (self.chessSize[0]*self.chessSize[1]) #create matrices that are needed to compute calibration mat = cv.CreateMat(3,3,cv.CV_32FC1) distCoeffs = cv.CreateMat(5,1,cv.CV_32FC1) p3d = cv.CreateMat(count,3,cv.CV_32FC1) #compute 3D points p2d = cv.CreateMat(count,2,cv.CV_32FC1) #compute 2D points pointCounts = cv.CreateMat( self.nframe ,1,cv.CV_32SC1) #give numpoints per images cv.Set(pointCounts,numpoints) rvecs = cv.CreateMat(self.nframe,3,cv.CV_32FC1) tvecs = cv.CreateMat(self.nframe,3,cv.CV_32FC1) i = 0 row = 0 col = 0 cv.Set(p3d,0.0) #to set every values to 0.0... and not set Z value #this compute points in row and cols... for p in self.points: p2d[i,0] = p[0] p2d[i,1] = p[1] p3d[i,0] = col p3d[i,1] = row col+=1 if col >= self.chessSize[0]: row+=1 col=0 if row >= self.chessSize[1]: row = 0 i+=1 #and now, calibrate... cv.CalibrateCamera2(p3d, p2d, pointCounts, self.framesize, mat, distCoeffs, rvecs, tvecs, flags=0) gui.setMessage("Intrasinc camera parameters checked") return (mat, distCoeffs)
def get_image(): im = cv.QueryFrame(camera) # cv.Flip(im, flipMode=1) # mirror effect success, corners = cv.FindChessboardCorners(im, PAT_SIZE, findFlags) if success: cv.DrawChessboardCorners(im, PAT_SIZE, corners, success) cv.PutText(im, "Found: (%.1f, %.1f)" % corners[0], infoOrigin, infoFont, (0, 255, 0)) cam_matrix = cv.CreateMat(3, 3, cv.CV_32F) dist_coeff = cv.CreateMat(1, 4, cv.CV_32F) rvecs = cv.CreateMat(1, 9, cv.CV_32F) tvecs = cv.CreateMat(1, 3, cv.CV_32F) pointArr = numpy.array([(x, y, 0) for y in xrange(PAT_SIZE[1]) for x in xrange(PAT_SIZE[0])], numpy.float32) objectPoints = cv.fromarray(pointArr) imgPointArr = numpy.array(corners, numpy.float32) imagePoints = cv.fromarray(imgPointArr) pointCounts = cv.CreateMat(1, 1, cv.CV_32S) pointCounts[0, 0] = PAT_SIZE[0] * PAT_SIZE[1] # Rodrigues version: rvecs3 = cv.CreateMat(1, 3, cv.CV_32F) cv.CalibrateCamera2(objectPoints, imagePoints, pointCounts, IMG_SIZE, cam_matrix, dist_coeff, rvecs3, tvecs) rmat3 = cv.CreateMat(3, 3, cv.CV_32F) cv.Rodrigues2(rvecs3, rmat3) # end Rodrigues version #cv.CalibrateCamera2(objectPoints, imagePoints, pointCounts, IMG_SIZE, cam_matrix, dist_coeff, rvecs, tvecs) #rmat = numpy.asarray(rvecs).reshape((3, 3), order='C') #print "RVecs:" #print rmat print "TVecs:", numpy.asarray(tvecs) # 3. column of R == rotated z versor, angle toward x; z=(2, 2), x=(0, 2) yaw = math.atan2(rmat3[0, 2], rmat3[2, 2]) * 180 / math.pi # rotated z versor, height y=(1, 2) to length==1 pitch = math.asin(rmat3[1, 2]) * 180 / math.pi # 1. column of R = rotated x versor, height y = (1, 0) to length==1 roll = math.asin(rmat3[1, 0]) * 180 / math.pi print "Yaw %5.2f, Pitch %5.2f, Roll %5.2f" % (yaw, pitch, roll) #import pdb; pdb.set_trace() print '-'*40 else: cv.PutText(im, "Not found.", infoOrigin, infoFont, (0, 255, 0)) return cv2pygame(im)
cv.Set2D(object_points2, i, 1, cv.Get2D(object_points, i, 1)) cv.Set2D(object_points2, i, 2, cv.Get2D(object_points, i, 2)) for i in range(successes): cv.Set2D(point_counts2, i, 0, cv.Get2D(point_counts, i, 0)) cv.Set2D(intrinsic, 0, 0, 1.0) cv.Set2D(intrinsic, 1, 1, 1.0) rotation_vectors = cv.CreateMat(successes, 3, cv.CV_32FC1) cv.SetZero(rotation_vectors) translation_vectors = cv.CreateMat(successes, 3, cv.CV_32FC1) cv.SetZero(translation_vectors) cv.CalibrateCamera2(object_points2, image_points2, point_counts2, cv.GetSize(image), intrinsic, distortion, rotation_vectors, translation_vectors, 0) cv.Save("Intrinsics.xml", intrinsic) cv.Save("Distortion.xml", distortion) intrinsic = cv.Load("Intrinsics.xml") distortion = cv.Load("Distortion.xml") mapx = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1) mapy = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1) cv.InitUndistortMap(intrinsic, distortion, mapx, mapy) cv.NamedWindow("Undistort") while (image):
intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1) distortion = cv.CreateMat(4, 1, cv.CV_64FC1) cv.SetZero(intrinsics) cv.SetZero(distortion) exR = cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1) exT = cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1) # focal lengths have 1/1 ratio intrinsics[0, 0] = 1.0 intrinsics[1, 1] = 1.0 cv.CalibrateCamera2( opts, ipts, npts, cv.GetSize(images[0]), intrinsics, distortion, exR, exT, flags=cv.CV_CALIB_ZERO_TANGENT_DIST) #cv.CV_CALIB_ZERO_TANGENT_DIST) # 0) print "D =", list(cvmat_iterator(distortion)) print "K =", list(cvmat_iterator(intrinsics)) print "R =", list(cvmat_iterator(exR)) print "T =", list(cvmat_iterator(exT)) mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) cv.InitUndistortMap(intrinsics, distortion, mapx, mapy) for img in images:
datacorners = zip(goodcorners[0], goldencorners[0]) + zip( goodcorners[1], goldencorners[1]) + zip(goodcorners[2], goldencorners[2]) + zip( goodcorners[3], goldencorners[3]) print datacorners objectPoints = cv.CreateMat(len(datacorners), 3, cv.CV_32FC1) imPoints = cv.CreateMat(len(datacorners), 2, cv.CV_32FC1) for i in range(len(datacorners)): p2d, p3d = datacorners[i] objectPoints[i, 0] = p3d[0] objectPoints[i, 1] = p3d[1] objectPoints[i, 2] = p3d[2] imPoints[i, 0] = p2d[0] imPoints[i, 1] = p2d[1] pointCounts = cv.CreateMat(1, 1, cv.CV_32SC1) pointCounts[0, 0] = len(datacorners) cameraMatrix = cv.CreateMat(3, 3, cv.CV_32FC1) distCoefs = cv.CreateMat(4, 1, cv.CV_32FC1) rvecs = cv.CreateMat(1, 1, cv.CV_32FC3) tvecs = cv.CreateMat(1, 1, cv.CV_32FC3) cv.CalibrateCamera2(objectPoints, imPoints, pointCounts, (img.width, img.height), cameraMatrix, distCoefs, rvecs, tvecs) print cameraMatrix print distCoefs print rvecs print tvecs
''' CalibrateCamera2(objectPoints, imagePoints, pointCounts, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, flags=0) StereoCalibrate(objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=NULL, F=NULL, term_crit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC) ''' cv.CalibrateCamera2(opts, ipts1, npts, cv.GetSize(images1[0]), intrinsics1, distortion1, exR1, exT1, flags = (cv.CV_CALIB_ZERO_TANGENT_DIST)) #cv.CV_CALIB_ZERO_TANGENT_DIST) # 0) cv.CalibrateCamera2(opts, ipts2, npts, cv.GetSize(images2[0]), intrinsics2, distortion2, exR2, exT2, flags = cv.CV_CALIB_ZERO_TANGENT_DIST) #cv.CV_CALIB_ZERO_TANGENT_DIST) # 0) #term_crit =(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 50, 0.00000001) term_crit = (cv.CV_TERMCRIT_ITER+cv.CV_TERMCRIT_EPS, 50, 1e-6)
def calibrate(imagedir): nimages = 0 datapoints = [] im_dims = (0,0) for f in os.listdir(imagedir): if (f.find('pgm')<0): continue image = imagedir+'/'+f grey = cv.LoadImage(image,cv.CV_LOAD_IMAGE_GRAYSCALE) found,points=cv.FindChessboardCorners(grey,dims,cv.CV_CALIB_CB_ADAPTIVE_THRESH) points=cv.FindCornerSubPix(grey,points,(11,11),(-1,-1),(cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER,30,0.1)) if (found): print 'using ', image nimages += 1 datapoints.append(points) im_dims = (grey.width, grey.height) #Number of points in chessboard num_pts = dims[0] * dims[1] #image points ipts = cv.CreateMat(nimages * num_pts, 2, cv.CV_32FC1) #object points opts = cv.CreateMat(nimages * num_pts, 3, cv.CV_32FC1) npts = cv.CreateMat(nimages, 1, cv.CV_32SC1) for i in range(0,nimages): k=i*num_pts squareSize = 1.0 # squareSize is 1.0 (i.e. units of checkerboard) for j in range(num_pts): cv.Set2D(ipts,k,0,datapoints[i][j][0]) cv.Set2D(ipts,k,1,datapoints[i][j][1]) cv.Set2D(opts,k,0,float(j%dims[0])*squareSize) cv.Set2D(opts,k,1,float(j/dims[0])*squareSize) cv.Set2D(opts,k,2,0.0) k=k+1 cv.Set2D(npts,i,0,num_pts) K = cv.CreateMat(3, 3, cv.CV_64FC1) D = cv.CreateMat(5, 1, cv.CV_64FC1) cv.SetZero(K) cv.SetZero(D) # focal lengths have 1/1 ratio K[0,0] = im_dims[0] K[1,1] = im_dims[0] K[0,2] = im_dims[0]/2 K[1,2] = im_dims[1]/2 K[2,2] = 1.0 rcv = cv.CreateMat(nimages, 3, cv.CV_64FC1) tcv = cv.CreateMat(nimages, 3, cv.CV_64FC1) #print 'object' #print array(opts) #print 'image' #print array(ipts) #print 'npts' #print array(npts) size=cv.GetSize(grey) flags = 0 #flags |= cv.CV_CALIB_FIX_ASPECT_RATIO #flags |= cv.CV_CALIB_USE_INTRINSIC_GUESS #flags |= cv.CV_CALIB_ZERO_TANGENT_DIST #flags |= cv.CV_CALIB_FIX_PRINCIPAL_POINT cv.CalibrateCamera2(opts, ipts, npts, size, K, D, rcv, tcv, flags) # storing results using CameraParams C = CameraParams(xresolution=im_dims[0], yresolution=im_dims[1]) print array(K) print array(D) C.setParams(K, D) C.save(imagedir+"/params.json")
cv.Set2D(object_points2, i, 0, cv.Get2D(object_points, i, 0)) cv.Set2D(object_points2, i, 1, cv.Get2D(object_points, i, 1)) cv.Set2D(object_points2, i, 2, cv.Get2D(object_points, i, 2)) for i in range(successes): cv.Set2D(point_counts2, i, 0, cv.Get2D(point_counts, i, 0)) cv.Set2D(intrinsic_matrix, 0, 0, 1.0) cv.Set2D(intrinsic_matrix, 1, 1, 1.0) rcv = cv.CreateMat(n_boards, 3, cv.CV_64FC1) tcv = cv.CreateMat(n_boards, 3, cv.CV_64FC1) print "checking camera calibration............." # camera calibration cv.CalibrateCamera2(object_points2, image_points2, point_counts2, cv.GetSize(image), intrinsic_matrix, distortion_coefficient, rcv, tcv, 0) print " checking camera calibration.........................OK " # storing results in xml files cv.Save("Intrinsics.xml", intrinsic_matrix) cv.Save("Distortion.xml", distortion_coefficient) # Loading from xml files intrinsic = cv.Load("Intrinsics.xml") distortion = cv.Load("Distortion.xml") print " loaded all distortion parameters" mapx = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1) mapy = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1) cv.InitUndistortMap(intrinsic, distortion, mapx, mapy) cv.NamedWindow("Undistort")
ipts = mk_image_points(goodcorners) opts = mk_object_points(len(goodcorners), .1) npts = mk_point_counts(len(goodcorners)) intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1) distortion = cv.CreateMat(4, 1, cv.CV_64FC1) cv.SetZero(intrinsics) cv.SetZero(distortion) # focal lengths have 1/1 ratio intrinsics[0, 0] = 1.0 intrinsics[1, 1] = 1.0 cv.CalibrateCamera2(opts, ipts, npts, cv.GetSize(images[0]), intrinsics, distortion, cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1), cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1), flags=0) # cv.CV_CALIB_ZERO_TANGENT_DIST) print "D =", list(cvmat_iterator(distortion)) print "K =", list(cvmat_iterator(intrinsics)) mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1) cv.InitUndistortMap(intrinsics, distortion, mapx, mapy) for img in images: r = cv.CloneMat(img) cv.Remap(img, r, mapx, mapy) cv.ShowImage("snap", r) cv.WaitKey()
def calibrate(gridFiles, gridSize, gridBlockSize): cpts = [] imageSize = None for gf in gridFiles: image = cv.LoadImage(gf, False) success, corners = cv.FindChessboardCorners(image, gridSize) corners = cv.FindCornerSubPix( image, corners, (5, 5), (-1, -1), (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1)) gridN = gridSize[0] * gridSize[1] if len(corners) != gridN: logging.debug("File failed: %s" % gf) continue # fix corners so that the first point is top left # compare corner[0] to corner[-1] if corners[0][0] > corners[1][0]: # flip left/right logging.debug("Grid is horizontally flipped") flipped = [] for x in xrange(gridSize[0]): for y in xrange(gridSize[1]): cx = gridSize[0] - x - 1 cy = y flipped.append(corners[cx + cy * gridSize[0]]) corners = flipped if corners[0][1] > corners[-1][1]: # flip top/bottom logging.debug("Grid is vertically flipped") flipped = [] for x in xrange(gridSize[0]): for y in xrange(gridSize[1]): cx = x cy = gridSize[1] - y - 1 flipped.append(corners[cx + cy * gridSize[0]]) corners = flipped cpts.append(corners) imageSize = cv.GetSize(image) nGrids = len(cpts) logging.debug("Found %i grids" % nGrids) if nGrids < 7: logging.warning("Few grids found: %i" % nGrids) if nGrids < 5: raise ValueError("Too few grids: %i" % nGrids) camMatrix = cv.CreateMat(3, 3, cv.CV_64FC1) cv.SetZero(camMatrix) camMatrix[0, 0] = 1. camMatrix[1, 1] = 1. distCoeffs = cv.CreateMat(5, 1, cv.CV_64FC1) cv.SetZero(distCoeffs) gridN = gridSize[0] * gridSize[1] imPts = cv.CreateMat(nGrids * gridN, 2, cv.CV_64FC1) objPts = cv.CreateMat(nGrids * gridN, 3, cv.CV_64FC1) ptCounts = cv.CreateMat(nGrids, 1, cv.CV_32SC1) # organize self.calibrationImgPts (to imPts) and construct objPts and ptCounts for (i, c) in enumerate(cpts): for j in xrange(gridN): imPts[j + i * gridN, 0] = c[j][0] imPts[j + i * gridN, 1] = c[j][1] # TODO should thes be actual points? how do I know what they are? objPts[j + i * gridN, 0] = j % gridSize[0] * gridBlockSize objPts[j + i * gridN, 1] = j / gridSize[0] * gridBlockSize objPts[j + i * gridN, 2] = 0. ptCounts[i, 0] = len(c) cv.CalibrateCamera2(objPts, imPts, ptCounts, imageSize, camMatrix, distCoeffs, cv.CreateMat(nGrids, 3, cv.CV_64FC1), cv.CreateMat(nGrids, 3, cv.CV_64FC1), 0) cv.Save("camMatrix.xml", camMatrix) cv.Save("distCoeffs.xml", distCoeffs)
def calibrateIntrinsicCamera(self, all_object_points, all_corners, imagesize, usenonlinoptim=True, fixprincipalpoint=False, computegradients=False): pointCounts = vstack( [len(object_points) for object_points in all_object_points]) cvKK = cv.CreateMat(3, 3, cv.CV_64F) cvkc = cv.CreateMat(5, 1, cv.CV_64F) cvrvecs = cv.CreateMat(len(pointCounts), 3, cv.CV_64F) cvtvecs = cv.CreateMat(len(pointCounts), 3, cv.CV_64F) flags = cv.CV_CALIB_FIX_PRINCIPAL_POINT if fixprincipalpoint else 0 cv.CalibrateCamera2(cv.fromarray(vstack(all_object_points)), cv.fromarray(vstack(all_corners)), cv.fromarray(pointCounts), (1024, 768), cvKK, cvkc, cvrvecs, cvtvecs, flags) rvecs = array(cvrvecs) tvecs = array(cvtvecs) KK = array(cvKK) kc = array(cvkc) Ts = [] for i in range(len(pointCounts)): T = matrixFromAxisAngle(rvecs[i]) T[0:3, 3] = tvecs[i] Ts.append(T) error = None if usenonlinoptim: # for some reason, the opencv solver is not doing as good of a job as it can... (it also uses levmarq) x0 = r_[KK[0, 0], KK[1, 1]] if not fixprincipalpoint: x0 = r_[x0, KK[0, 2], KK[1, 2]] x0 = r_[x0, kc[:, 0]] for i in range(len(pointCounts)): x0 = r_[x0, rvecs[i], tvecs[i]] N = pointCounts[0] cv_image_points = cv.CreateMat(N, 2, cv.CV_64F) cv_object_points = [cv.fromarray(x) for x in all_object_points] def errorfn(x): xoff = 2 cvKK[0, 0] = x[0] cvKK[1, 1] = x[1] if not fixprincipalpoint: cvKK[0, 2] = x[2] cvKK[1, 2] = x[3] xoff += 2 for i in range(5): cvkc[i, 0] = x[xoff + i] xoff += 5 e = zeros(len(all_object_points) * N * 2) off = 0 for i in range(len(all_object_points)): for j in range(3): cvrvecs[0, j] = x[xoff + 6 * i + j] cvtvecs[0, j] = x[xoff + 6 * i + 3 + j] cv.ProjectPoints2(cv_object_points[i], cvrvecs[0], cvtvecs[0], cvKK, cvkc, cv_image_points) image_points = array(cv_image_points) e[off:(off + len(image_points) )] = all_corners[i][:, 0] - image_points[:, 0] off += len(image_points) e[off:(off + len(image_points) )] = all_corners[i][:, 1] - image_points[:, 1] off += len(image_points) #print 'rms: ',sqrt(sum(e**2)) return e x, success = leastsq(errorfn, x0, maxfev=100000, epsfcn=1e-7) if not success: raise CalibrationError('failed to converge to answer') e = errorfn(x) abse = sqrt(sum(e**2)) e2 = reshape(e, [len(all_object_points), 2 * N])**2 error = mean(sqrt(e2[:, 0:N] + e2[:, N:]), 1) KK[0, 0] = x[0] KK[1, 1] = x[1] xoff = 2 if not fixprincipalpoint: KK[0, 2] = x[2] KK[1, 2] = x[3] xoff += 2 for i in range(5): kc[i, 0] = x[xoff + i] if computegradients: deltas = r_[0.01 * ones(2 if fixprincipalpoint else 4), 0.0001 * ones(5)] grad = [] normalization = 1.0 / (len(all_object_points) * N * 2) for i, delta in enumerate(deltas): x[i] += delta e_p = errorfn(x) abse_p = sqrt(sum(e_p**2)) x[i] -= 2 * delta e_n = errorfn(x) abse_n = sqrt(sum(e_n**2)) x[i] += delta grad.append( normalization * (abse_p + abse_n - 2 * abse) / (delta** 2)) #sum((e_p-e)**2) + sum((e-e_n)**2))/(2.0*delta)) return KK, kc, Ts, error, array(grad) return KK, kc, Ts, error
def calibrate(image_corners, chessboard_points, image_size): """Calibrate a camera. This function determines the intrinsic matrix and the extrinsic matrices of a camera. Parameters ---------- image_corners : list List of the M outputs of cv.FindChessboardCorners, where M is the number of images. chessboard_points : ndarray Nx3 matrix with the (X,Y,Z) world coordinates of the N corners of the calibration chessboard pattern. image_size : tuple Size (height,width) of the images captured by the camera. Output ------ intrinsic : ndarray 3x3 intrinsic matrix extrinsic : list of ndarray List of M 4x4 transformation matrices or None values. For the images which had good detections given by cv.FindChessboardCorners, the corresponding cells have the extrinsic matrices. For the images with bad detections, the corresponding cells are None. """ valid_corners = filter(itemgetter(0), image_corners) num_images = len(image_corners) num_valid_images = len(valid_corners) num_corners = len(valid_corners[0][1]) # Input data. object_points = np.vstack([chessboard_points] * num_valid_images) image_points = np.vstack(map(itemgetter(1), valid_corners)) point_counts = np.array([[num_corners] * num_valid_images]) # Output matrices. intrinsic = np.zeros((3, 3)) dist_coeffs = np.zeros((1, 4)) rvecs = np.zeros((len(valid_corners), 9)) tvecs = np.zeros((len(valid_corners), 3)) # Calibrate. cv.CalibrateCamera2(cv.fromarray(np.array(object_points, dtype=np.float_)), cv.fromarray(np.array(image_points, dtype=np.float_)), cv.fromarray(np.int32(point_counts)), image_size, cv.fromarray(intrinsic), cv.fromarray(dist_coeffs), cv.fromarray(rvecs), cv.fromarray(tvecs)) # Build the transformation matrices. rvecs = iter(rvecs) tvecs = iter(tvecs) def vecs2matrices(c): if c[0] == 0: return None R = np.reshape(rvecs.next(), (3, 3)) t = np.reshape(tvecs.next(), (3, 1)) return np.vstack([np.hstack([R, t]), [0, 0, 0, 1]]) extrinsic = map(vecs2matrices, image_corners) return intrinsic, extrinsic