def rectifyImage(dataset, imageSource, mode=cs.path_mode): """ Returns rectified camera image as ndarray dataset: Tuple of (cameraMatrix, distortion_coefficients) imageSource: This can either be path or ndarray depending on 'mode' (see below) mode: Can be 'path_mode' or 'stream_mode' Specify whether 'imageSource' is path or ndarray """ if mode == cs.path_mode: img = cv2.imread(imageSource, 0) elif mode == cs.stream_mode: img = imageSource else: print(cs.getMessage(cs.invalid_mode)) raise Exception() mtx, dist = dataset h, w = img.shape[:2] newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h)) mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w, h), 5) dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR) x, y, w, h = roi dst = dst[y:y+h, x:x+w] return dst
def stereoRectify(dataset, imageSource, mode=cs.path_mode, retQ=False): """ Returns the rectified images in a tuple (image_1, image_2 [, Q]) after rectification and each image_# is an ndarray. Q will be returned depending on retQ. dataset: Tuple of (cameraMatrix_1, distortion_coefficients_1, cameraMatrix_2, distortion_coefficients_2, rotation, translation) Refer to 'constants.py' for the camera mapping (1, 2) --> (L, R) imageSource: Tuple of (image_1, image_2) These can either be path or ndarray depending on 'mode' (see below) mode: Can be 'path_mode' or 'stream_mode' Specify whether 'imageSource' is path or ndarray retQ: Can be True or False Specify whether to return the perspective transformation matrix True - return value will be tuple of 3-elements False - return value will be tuple of 2-elements """ if mode == cs.path_mode: img1 = cv2.imread(imageSource[0], 0) img2 = cv2.imread(imageSource[1], 0) elif mode == cs.stream_mode: img1 = imageSource[0] img2 = imageSource[1] else: print(cs.getMessage(cs.invalid_mode)) raise Exception() camMtx1, distCoeffs1, camMtx2, distCoeffs2, rotation, translation, = dataset h, w = img1.shape[:2] imgSize = (w, h) # Calculating rectification parameters ## Change the alpha to 0, will remove useless areas (black pixels) ## Change the alpha to 1, will keep the useless areas (black pixels) data = cv2.stereoRectify(cameraMatrix1=camMtx1, distCoeffs1=distCoeffs1, cameraMatrix2=camMtx2, distCoeffs2=distCoeffs2, imageSize=imgSize, R=rotation, T=translation, alpha=0, newImageSize=(0, 0)) R1, R2, P1, P2, Q, validROI1, validROI2 = data # Performing rectification data = (camMtx1, distCoeffs1, R1, P1, imgSize) dstImg1 = rectifyImage(img1, data) data = (camMtx2, distCoeffs2, R2, P2, imgSize) dstImg2 = rectifyImage(img2, data) if retQ: res = (dstImg1, dstImg2, Q) else: res = (dstImg1, dstImg2) return res
def generateDisparityMap(imageSource, mode=cs.path_mode, show=False): """ Returns the disparity map as ndarray imageSource: Tuple of (image_1, image_2) These can either be path or ndarray depending on 'mode' (see below) mode: Can be 'path_mode' or 'stream_mode' Specify whether 'imageSource' is path or ndarray show: Specifies whether to display the generated disparity map as image """ if mode == cs.path_mode: img1 = cv2.imread(imageSource[0], 0) img2 = cv2.imread(imageSource[1], 0) elif mode == cs.stream_mode: img1 = imageSource[0] img2 = imageSource[1] else: print(cs.getMessage(cs.invalid_mode)) raise Exception() dispValues = cs.getDisparityValue() numDisp = dispValues[1] - dispValues[0] if numDisp%16 != 0: print("Invalid Input: 'numDisparities' should be divisible by 16.") raise Exception() block = 7 p1 = 8*4**4 p2 = 4*p1 stereo = cv2.StereoSGBM_create(minDisparity=dispValues[0], numDisparities=numDisp, blockSize=block, P1=p1, P2=p2, disp12MaxDiff=1, uniquenessRatio=10, speckleWindowSize=100, speckleRange=2, mode=True) disp = stereo.compute(img1, img2).astype(np.float32)/16 if show: plt.imshow((disp-dispValues[0])/numDisp, "gray") plt.show() return disp
def run(self): if socket.gethostname() == cs.getHostName(cs.master_entity): # Starting the main process print("Starting main structure...") self.sense = SenseHat() self.sense.set_pixels(self.mainPixelMatrix) try: # Step 1: Starting application on the Master Pi currFrame = 1 self.setPixelFrame(currFrame, self.go_green) # Step 2: Checking application status of Slave Pi currFrame = 2 clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) clientSocket.settimeout(cs.connTimeout) clientSocket.connect( (cs.getIP(cs.slave_entity), cs.getPort(cs.slave_entity))) clientSocket.close() self.setPixelFrame(currFrame, self.go_green) # Step 3, 4 and 5: Checking for calibration data currFrame = 3 filePath = cs.getCalibDataDir(cs.root) + cs.getFileName( cs.camera, prefix=cs.getCamera(1)) camCalib1 = self.getFileData(filePath, currFrame) camMtx1, distCoeffs1 = camCalib1[0], camCalib1[1] currFrame = 4 filePath = cs.getCalibDataDir(cs.root) + cs.getFileName( cs.camera, prefix=cs.getCamera(2)) camCalib2 = self.getFileData(filePath, currFrame) camMtx2, distCoeffs2 = camCalib2[0], camCalib2[1] currFrame = 5 filePath = cs.getCalibDataDir(cs.root) + cs.getFileName( cs.stereo) rotate, translate, essential, fundamental = self.getFileData( filePath, currFrame) # TODO: Run this on parallel threads # Step 6: Starting system process currFrame = 6 self.setPixelFrame(currFrame, self.go_green) # TODO: Multi-process these step q = True while q: img1 = ct.takePic() img2 = ct.takeRemotePic() img1 = cr.rectifyImage((camMtx1, distCoeffs1), img1, cs.stream_mode) img2 = cr.rectifyImage((camMtx2, distCoeffs2), img2, cs.stream_mode) dataset = (camMtx1, distCoeffs1, camMtx2, distCoeffs2, rotate, translate) data = sr.stereoRectify(dataset, (img1, img2), cs.stream_mode, True) imgs = (data[0], data[1]) disp = dm.generateDisparityMap(imgs, cs.stream_mode, True) pcg.generatePointCloud(disp, imgs, data[2]) q = input("Try one more time (y/n): ") if q.lower() == "y": q = True elif q.lower() == "n": q = False else: print(cs.getMessage(cs.invalid_binary, "YN")) # Multi-process this step ## TODO: Add code to send disparity to slave pi for point cloud generation ## TODO: Add code for potential region selection ## TODO: Add code to call the required control planning system except: if currFrame == 2: clientSocket.close() self.setPixelFrame(currFrame, self.err) finally: time.sleep(10) self.sense.clear() elif socket.gethostname() == cs.getHostName(cs.slave_entity): # Starting main process print("Starting server...") host = "" port = cs.getPort(cs.slave_entity) Server(host, port).startServer() else: print( "Invalid System being used.! The host name isn't registered.")
def verifyEpipolarLines(imageSource, mode=cs.path_mode): """ This function verifies the epipolar lines for parallelness. And be used for verifying the stereo calibraion data. imageSource: Tuple of (image_1, image_2) These can either be path or ndarray depending on 'mode' (see below) mode: Can be 'path_mode' or 'stream_mode' Specify whether 'imageSource' is path or ndarray """ if mode == cs.path_mode: img1 = cv2.imread(imageSource[0], 0) img2 = cv2.imread(imageSource[1], 0) elif mode == cs.stream_mode: img1 = imageSource[0] img2 = imageSource[1] else: print(cs.getMessage(cs.invalid_mode)) raise Exception() sift = cv2.SIFT() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) # FLANN parameters FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) good = [] pts1 = [] pts2 = [] # ratio test as per Lowe's paper for i, (m, n) in enumerate(matches): if m.distance < 0.8*n.distance: good.append(m) pts2.append(kp2[m.trainIdx].pt) pts1.append(kp1[m.queryIdx].pt) pts1 = np.int32(pts1) pts2 = np.int32(pts2) F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS) # We select only inlier points pts1 = pts1[mask.ravel() == 1] pts2 = pts2[mask.ravel() == 1] # Find epilines corresponding to points in right image (second image) and # drawing its lines on left image lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F) lines1 = lines1.reshape(-1, 3) img5 = drawlines(img1, img2, lines1, pts1, pts2)[0] # Find epilines corresponding to points in left image (first image) and # drawing its lines on right image lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F) lines2 = lines2.reshape(-1, 3) img3 = drawlines(img2, img1, lines2, pts2, pts1)[0] plt.subplot(121) plt.imshow(img5) plt.subplot(122) plt.imshow(img3) plt.show()
if not os.path.exists(calibDir): print("Directory doesn't exist. Creating directory...") os.makedirs(calibDir) print("Starting Camera Caliberation...") print( str(TOTAL_PICS) + " pictures are needed to configure the camera.\n") while True: camType = input( "Enter the camera that you want to caliberate (1/2): ") if camType == "1" or camType == "2": camType = cs.getCamera(camType) break else: print(cs.getMessage(cs.invalid_binary, AB="12")) checkerBoard = (9, 6) squareSize = None # square edge length in cm # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((np.product(checkerBoard), 3), np.float32) objp[:, :2] = np.indices(checkerBoard).T.reshape(-1, 2) # objp *= squareSize # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space