def estimate_charuco_board_pose(image, CHARUCO_BOARD_SETTINGS_FILE, camera_matrix, dist_coefficients): """ :param image: :param CHARUCO_BOARD_SETTINGS_FILE: :param camera_matrix: :param dist_coefficients: :return: retval, rvec, tvec - rotation and translation vector """ # load ChAruco board object board = load_charuco_board(CHARUCO_BOARD_SETTINGS_FILE) # detect charuco board corners and ids retval, ch_corners, ch_ids = detect_charuco_board_corners( image, CHARUCO_BOARD_SETTINGS_FILE) if retval: # find rotation and translation vectors retval, rvec, tvec = aruco.estimatePoseCharucoBoard( ch_corners, ch_ids, board, camera_matrix, dist_coefficients) if retval: return retval, rvec, tvec logger.info("No corners Detected") return False, None, None
def calibrateWithBoard(self, imgs, sensor, draw=False): # Constant parameters used in Aruco methods ARUCO_PARAMETERS = aruco.DetectorParameters_create() ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_50) # Create grid board object we're using in our stream CHARUCO_BOARD = aruco.CharucoBoard_create( squaresX=10, squaresY=6, squareLength=0.04, #in meters markerLength=0.03, #in meters dictionary=ARUCO_DICT) # grayscale image gray = cv2.cvtColor(imgs[sensor], cv2.COLOR_BGR2GRAY) # Detect Aruco markers corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS) # Refine detected markers # Eliminates markers not part of our board, adds missing markers to the board corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(image = gray, board = CHARUCO_BOARD, detectedCorners = corners, detectedIds = ids, rejectedCorners = rejectedImgPoints, cameraMatrix = self.intrinsic[sensor], distCoeffs = self.distCoeffs) #print('Found {} corners in C{} sensor {}'.format(len(corners), self.camId, sensor)) imgs[sensor] = aruco.drawDetectedMarkers(imgs[sensor], corners, ids=ids, borderColor=(0, 0, 255)) # Only try to find CharucoBoard if we found markers if ids is not None and len(ids) > 10: # Get charuco corners and ids from detected aruco markers response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(markerCorners=corners, markerIds=ids, image=gray, board=CHARUCO_BOARD) # Require more than 20 squares if response is not None and response > 20: # Estimate the posture of the charuco board, which is a construction of 3D space based on the 2D video pose, self.extrinsic[sensor]['rvec'], self.extrinsic[sensor]['tvec'] = aruco.estimatePoseCharucoBoard(charucoCorners=charuco_corners, charucoIds=charuco_ids, board=CHARUCO_BOARD, cameraMatrix=self.intrinsic[sensor], distCoeffs=self.distCoeffs) if draw: imgs[sensor] = aruco.drawAxis(imgs[sensor], self.intrinsic[sensor], self.distCoeffs, self.extrinsic[sensor]['rvec'], self.extrinsic[sensor]['tvec'], 2) cv2.imwrite(rospack.get_path('corsmal_benchmark_s2')+'/data/out/calib_C{}_{}.png'.format(self.camId,sensor), imgs[sensor]) else: print('Calibration board is not fully visible for C{} sensor: {}'.format(self.camId, sensor)) assert 1==0 self.extrinsic[sensor]['rvec'] = cv2.Rodrigues(self.extrinsic[sensor]['rvec'])[0] self.extrinsic[sensor]['projMatrix'] = np.matmul(self.intrinsic[sensor], np.concatenate((self.extrinsic[sensor]['rvec'],self.extrinsic[sensor]['tvec']), axis=1))
def estimatePoseToBoard(_img, cameraMatrix, distCoeffs): img = _img.copy() # grayscale image gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Detect Aruco markers corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS) # Refine detected markers # Eliminates markers not part of our board, adds missing markers to the board corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers( image=gray, board=CHARUCO_BOARD, detectedCorners=corners, detectedIds=ids, rejectedCorners=rejectedImgPoints, cameraMatrix=cameraMatrix, distCoeffs=distCoeffs) ## REMOVE ID 49 (the robot marker) corners, ids = removeMarkerById(corners, ids, 49) img = aruco.drawDetectedMarkers(img, corners, ids=ids, borderColor=(0, 0, 255)) rvec, tvec = None, None # Only try to find CharucoBoard if we found markers if ids is not None and len(ids) > 10: # Get charuco corners and ids from detected aruco markers response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco( markerCorners=corners, markerIds=ids, image=gray, board=CHARUCO_BOARD) # Require more than 20 squares if response is not None and response > 20: # Estimate the posture of the charuco board pose, rvec, tvec = aruco.estimatePoseCharucoBoard( charucoCorners=charuco_corners, charucoIds=charuco_ids, board=CHARUCO_BOARD, cameraMatrix=cameraMatrix, distCoeffs=distCoeffs) img = aruco.drawAxis(img, cameraMatrix, distCoeffs, rvec, tvec, 2) cv2.imwrite('calib_board.png', img) else: print('Calibration board is not fully visible') assert 1 == 0 return cv2.Rodrigues(rvec)[0], tvec
def callback(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) (rows, cols, channels) = cv_image.shape if cols > 60 and rows > 60: corners, ids, rejectedImgPoints = aruco.detectMarkers( cv_image, self.aruco_dict, parameters=self.parameters) #print("aruco") #print(corners) if len(corners) > 0: retval, charucoCorners, charucoIds = aruco.interpolateCornersCharuco( corners, ids, cv_image, self.board) #print(diamondCorners) img = aruco.drawDetectedCornersCharuco(cv_image, charucoCorners) if self.camera_info_read and (not charucoCorners is None): retval, rvec, tvec = aruco.estimatePoseCharucoBoard( charucoCorners, charucoIds, self.board, self.camera_matrix, self.dist_coeffs) #print(retval) if not rvec is None: img = aruco.drawAxis(img, self.camera_matrix, self.dist_coeffs, rvec, tvec, 0.1) marker_msg = MarkerArray() marker_msg.header = data.header marker_msg.markers = [] #print(corners) if not ids is None: if len(ids) != 0: for i in range(len(ids)): marker_msg.markers.append(Marker()) marker_msg.markers[i].id = int(ids[i]) marker_msg.markers[i].corners = [] for j in range((corners[i]).shape[1]): marker_msg.markers[i].corners.append(Point2D()) marker_msg.markers[i].corners[j].x = corners[i][0, j, 0] marker_msg.markers[i].corners[j].y = corners[i][0, j, 1] #cv2.imshow("Image window", cv_image) #cv2.waitKey(3) try: self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8")) self.marker_pub.publish(marker_msg) except CvBridgeError as e: print(e)
def detect_board(): vs = VideoStream(usePiCamera=True, resolution=res).start() fps = FPS().start() time.sleep(1) freq = 0 i=0 target_x = [] target_y = [] while(i<5000): tf = time.time() # Capture frame-by-frame frame = vs.read() # Our operations on the frame come here gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) parameters = aruco.DetectorParameters_create() #print(parameters)) status = "No Marker Detected" position_dict = {} corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters) if ids is not None: ret, ch_corners, ch_ids = aruco.interpolateCornersCharuco(corners, ids, gray, board ) # if there are enough corners to get a reasonable result if( ret > 5 ): aruco.drawDetectedCornersCharuco(frame,ch_corners,ch_ids,(0,0,255) ) retval, rvec, tvec = aruco.estimatePoseCharucoBoard( ch_corners, ch_ids, board, camera_matrix, dist_coeff ) # if a pose could be estimated if( retval ) : frame = aruco.drawAxis(frame,camera_matrix,dist_coeff,rvec,tvec,0.1) rmat = cv2.Rodrigues(rvec)[0] position_camera = -np.matrix(rmat).T * np.matrix(tvec) position_dict[ids[0][0]] = position_camera status = ("x: %.2f, y: %.2f, z: %.2f")%(position_camera[0],position_camera[1],position_camera[2]) # Display the resulting frame cv2.putText(frame, status, (20,30), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,255),2) out.write(frame) #cv2.imshow('frame',gray) #if cv2.waitKey(1) & 0xFF == ord('q'): # break i += 1 freq = .7*freq + .3 * (1/(time.time()-tf)) fps.update() #print("Operating frequency: %.2f Hz"%freq) # When everything done, release the capture out.release() vs.stop() cv2.destroyAllWindows()
def getPose(im, board, m, d): if len(im.shape) > 2: gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) else: gray = im # parameters = aruco.DetectorParameters_create() # corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary, parameters=parameters) corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary) ret, chcorners, chids = aruco.interpolateCornersCharuco(corners, ids, gray, board) ret, rvecs, tvecs = aruco.estimatePoseCharucoBoard(chcorners, chids, board,m,d) im = aruco.drawAxis(im, m, d, rvecs, tvecs,0.5) cv2.imshow('markers', im) cv2.waitKey()
def estimate_pose_points(self, camera, corners, ids): if corners is None or ids is None or len(corners) < 4: return None, None n_corners = corners.size // 2 corners = np.reshape(corners, (n_corners, 1, 2)) K = camera.get_camera_matrix() D = camera.get_distortions() ret, rvec, tvec = aruco.estimatePoseCharucoBoard( corners, ids, self.board, K, D) return rvec, tvec
def update_pose(self): if self.N_pts < 4: self.pose_valid = False return ret = estimatePoseCharucoBoard(self.ccorners, self.cids, self.board, self.K, self.cdist) self.pose_valid, rvec, tvec = ret if not self.pose_valid: return self.rvec = rvec.ravel() self.tvec = tvec.ravel()
def get_ee_wrt_cam(self, corners, ids, intrinsics, convention='robotic'): retval, rvecs, tvecs = aruco.estimatePoseCharucoBoard( corners, ids, charuco_board, np.array(intrinsics["K"]), np.array(intrinsics["distortion"])) if retval: Ccv_X_M = np.eye(4) Ccv_X_M[:3, 3] = tvecs[:3].reshape(3) Ccv_X_M[0:3, 0:3] = cv2.Rodrigues(rvecs[:])[0] Ccv_X_E = Ccv_X_M.dot(self.M_X_E) E_X_Ccv = np.linalg.inv(Ccv_X_E) if convention == 'robotic': return E_X_Ccv.dot(self.cv2robotics) else: return E_X_Ccv else: print('Board Pose not detected') return None
def run_charuco_detection(camera: Camera, device_index: any = None): # TODO: use charuco context aruco_dict, charuco_board = create_default_board() camera_matrix, dist_coeff = camera.intrinsic.camera_matrix, camera.intrinsic.dist_coeffs cap = camera.get_capture_device(device_index) def display(frame): # resize proportion = max(frame.shape) / 1000.0 im = cv2.resize(frame, (int( frame.shape[1] / proportion), int(frame.shape[0] / proportion))) # show the debug image cv2.imshow('calibration', im) try: while True: more, frame = cap.read() if not more: return corners, ids = detect_charuco(frame, charuco_board, aruco_dict) if not (corners is None): ret, p_rvec, p_tvec = aruco.estimatePoseCharucoBoard( charucoCorners=corners, charucoIds=ids, board=charuco_board, cameraMatrix=camera_matrix, distCoeffs=dist_coeff, rvec=None, tvec=None) try: frame = aruco.drawAxis(frame, camera_matrix, dist_coeff, p_rvec, p_tvec, 0.1) except cv2.error as e: pass display(frame) if cv2.waitKey(1) & 0xFF == ord('q'): break finally: cap.release()
def est_board_pose(self, cv2_img): gray = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2GRAY) corners, ids, _ = aruco.detectMarkers(image=gray, dictionary=aruco_dict) if ids is not None and len(ids) > 5: _, Chcorners, Chids = aruco.interpolateCornersCharuco( corners, ids, gray, charuco_board) retval, rvecs, tvecs = aruco.estimatePoseCharucoBoard( Chcorners, Chids, charuco_board, self.camera_matrix[:, :], self.distCoeffs[:, :]) ####################### X_C_M = np.eye(4) X_C_M[0, 3] = tvecs[0] X_C_M[1, 3] = tvecs[1] X_C_M[2, 3] = tvecs[2] R = cv2.Rodrigues(rvecs[:])[0] X_C_M[0:3, 0:3] = R X_C_E = X_C_M.dot(self.X_M_E) tvecs_new = X_C_E[:3, 3] rvecs_new = cv2.Rodrigues(X_C_E[0:3, 0:3])[0] # ##################### QueryImg = aruco.drawDetectedCornersCharuco( cv2_img, Chcorners, Chids, (0, 0, 255)) if (retval): QueryImg = aruco.drawAxis(cv2_img, self.camera_matrix[:, :], self.distCoeffs[:, :], rvecs, tvecs, 0.032) QueryImg = aruco.drawAxis(QueryImg, self.camera_matrix[:, :], self.distCoeffs[:, :], rvecs_new, tvecs_new, 0.032) # Display our image # cv2.imshow('QueryImage', QueryImg) # cv2.waitKey(0) else: rvecs = np.zeros((3, 1)) tvecs = np.zeros((3, 1)) cv2.imshow('QueryImage', cv2_img) print("board not detected") cv2.waitKey(1) #print("{} markers detected".format(len(ids))) self.tvecs = tvecs self.rvecs = rvecs
def estimate_pose_aruco(gray, intrinsics, board): detectedCorners, detectedIds = detect_aruco(gray, intrinsics, board) if len(detectedIds) < 3: return False, None INTRINSICS_K = np.array(intrinsics['camera_mat']) INTRINSICS_D = np.array(intrinsics['dist_coeff']) board_type = get_board_type(board) if board_type == 'charuco': ret, rvec, tvec = aruco.estimatePoseCharucoBoard( detectedCorners, detectedIds, board, INTRINSICS_K, INTRINSICS_D) else: ret, rvec, tvec = aruco.estimatePoseBoard(detectedCorners, detectedIds, board, INTRINSICS_K, INTRINSICS_D) if not ret or rvec is None or tvec is None: return False, None return True, (detectedCorners, detectedIds, rvec, tvec)
def get_cam_poses(images, intr, save_path=None): aruco_dict, board, distCoeffs, cameraMatrix = get_board(intr) cam_poses = [] for image in images: gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers( gray, aruco_dict) if len(corners) > 0: charucoretval, charucoCorners, charucoIds = aruco.interpolateCornersCharuco( corners, ids, gray, board) rvec = None tvec = None retval, rvec, tvec = aruco.estimatePoseCharucoBoard( charucoCorners, charucoIds, board, cameraMatrix, distCoeffs, rvec, tvec) rmat = cv2.Rodrigues(rvec)[0] trans_mat = np.zeros((4, 4)) trans_mat[3, 3] = 1 trans_mat[:3, :3] = rmat trans_mat[:3, 3] = tvec[:, 0] cam_poses.append(trans_mat.flatten()) cam_poses = np.array(cam_poses) if not save_path: print(cam_poses) else: if not os.path.exists(save_path): os.makedirs(save_path) save_path = os.path.join(save_path, 'cam_poses.yaml') fs_write = cv2.FileStorage(save_path, cv2.FILE_STORAGE_WRITE) fs_write.write("poses", cam_poses) fs_write.release() return cam_poses
def stream_cam_pose(stop_after_image=False): cam = DepthCam(depth_frame_height=720, depth_frame_width=1280, color_frame_height=720, color_frame_width=1280) intr = cam.get_intrinsics() aruco_dict, board, distCoeffs, cameraMatrix = get_board(intr) fps = 2 while True: start = time.time() intr = cam.get_intrinsics() out = cam.get_frames() gray = cv2.cvtColor(out['image'], cv2.COLOR_RGB2GRAY) corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers( gray, aruco_dict) imsize = gray.shape charucoretval, charucoCorners, charucoIds = aruco.interpolateCornersCharuco( corners, ids, gray, board) rvec = None tvec = None retval, rvec, tvec = aruco.estimatePoseCharucoBoard( charucoCorners, charucoIds, board, cameraMatrix, distCoeffs, rvec, tvec) rmat = cv2.Rodrigues(rvec)[0] trans_mat = np.zeros((4, 4)) trans_mat[3, 3] = 1 trans_mat[:3, :3] = rmat trans_mat[:3, 3] = tvec[:, 0] print(trans_mat) eps = time.time() - start if eps < 1 / fps: time.sleep(1 / fps - eps) if stop_after_image: input()
dst = cv2.undistort(im_gray, camera_matrix, distortion_coff, None, newcameramtx) corners, ids, rejectedImgPoints = aruco.detectMarkers( dst, aruco_dict, parameters=arucoParams) cv2.imshow("original", im_gray) if not corners: print("pass") else: aruco.refineDetectedMarkers(im_gray, board, corners, ids, rejectedImgPoints) charucoretval, charucoCorners, charucoIds = aruco.interpolateCornersCharuco( corners, ids, im_gray, board) image_with_charuco_board = aruco.drawDetectedCornersCharuco( img_charuco, charucoCorners, charucoIds, (0, 255, 0)) status, rvec, tvec = aruco.estimatePoseCharucoBoard( charucoCorners, charucoIds, board, camera_matrix, distortion_coff) if status != 0: img_aruco = aruco.drawAxis(image_with_charuco_board, newcameramtx, distortion_coff, rvec, tvec, 20) else: print('no markers detected') if cv2.waitKey(0) & 0xFF == ord('q'): cv2.imwrite(base_folder + 'detected charuco pattern.jpg', img_charuco) break cv2.imshow("World co-ordinate frame axes", img_charuco) camera.release() cv2.destroyAllWindows()
def findCharucoBoard(self, req): self.rvecs_arr = np.zeros((3, NUMBER)) self.tvecs_arr = np.zeros((3, NUMBER)) res = aruco_infoResponse() for order in range(NUMBER): # Capturing each frame of our video stream # ret, self.QueryImg = self.cam.read() frames = self.pipeline.wait_for_frames() color_frame = frames.get_color_frame() self.QueryImg = np.asanyarray(color_frame.get_data()) # grayscale image gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY) # Detect Aruco markers corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, self.aruco_dict, parameters=self.aruco_param) # Refine detected markers # Eliminates markers not part of our board, adds missing markers to the board corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers( image=gray, board=self.charuco_board, detectedCorners=corners, detectedIds=ids, rejectedCorners=rejectedImgPoints, cameraMatrix=self.cameraMatrix, distCoeffs=self.distCoeffs) # Require 15 markers before drawing axis if ids is not None and len(ids) > 10: response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco( markerCorners=corners, markerIds=ids, image=gray, board=self.charuco_board) # Require more than 20 squares if response is not None and response > 20: # Estimate the posture of the charuco board, which is a construction of 3D space based on the 2D video pose, rvec, tvec = aruco.estimatePoseCharucoBoard( charucoCorners=charuco_corners, charucoIds=charuco_ids, board=self.charuco_board, cameraMatrix=self.cameraMatrix, distCoeffs=self.distCoeffs) if pose: if order == 0: print( "=============================================" ) print(rvec) print(tvec) for i in range(3): self.rvecs_arr[i][order] = rvec[i][0] self.tvecs_arr[i][order] = tvec[i][0] # self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, rvec, tvec, 0.02) cv2.waitKey(10) # Display our image # print('self.rvecs_arr = ', self.rvecs_arr) # print('self.tvecs_arr = ', self.tvecs_arr) cv2.destroyAllWindows() r_avg = np.zeros(3) t_avg = np.zeros(3) ra = self.rvecs_arr[0].nonzero() rb = self.rvecs_arr[1].nonzero() rc = self.rvecs_arr[2].nonzero() tx = self.tvecs_arr[0].nonzero() ty = self.tvecs_arr[1].nonzero() tz = self.tvecs_arr[2].nonzero() ra = self.rvecs_arr[0][ra] rb = self.rvecs_arr[1][rb] rc = self.rvecs_arr[2][rc] tx = self.tvecs_arr[0][tx] ty = self.tvecs_arr[1][ty] tz = self.tvecs_arr[2][tz] ra = np.sort(ra, kind='quicksort') rb = np.sort(rb, kind='quicksort') rc = np.sort(rc, kind='quicksort') tx = np.sort(tx, kind='quicksort') ty = np.sort(ty, kind='quicksort') tz = np.sort(tz, kind='quicksort') r = np.array((ra, rb, rc)) t = np.array((tx, ty, tz)) for i in range(3): rv, tv = r[i], t[i] while np.std(rv) > 0.01 and len(rv) >= NUMBER * 0.2: if abs(rv[0] - np.average(rv)) > abs(rv[-1] - np.average(rv)): rv = np.delete(rv, 0) else: rv = np.delete(rv, -1) while np.std(tv) > 0.01 and len(tv) >= NUMBER * 0.2: if abs(tv[0] - np.average(tv)) > abs(tv[-1] - np.average(tv)): tv = np.delete(tv, 0) else: tv = np.delete(tv, -1) r_avg[i] = np.average(rv) t_avg[i] = np.average(tv) res.rvecs = r_avg res.tvecs = t_avg print('res.rvecs is ', res.rvecs) print('res.tvecs is ', res.tvecs) result = np.array(()) result = np.append(result, [np.copy(r_avg), np.copy(t_avg)]) result = result.reshape(2, 3) filename = self.curr_path + "/pic/charucoboard-pic-" + str( int(self.frameId)) + ".jpg" cv2.imwrite(filename, self.QueryImg) # Outline all of the markers detected in our image self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners, borderColor=(0, 0, 255)) self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, result[0], result[1], 0.02) # self.curr_path = os.path.dirname(os.path.abspath(__file__)) filename = self.curr_path + "/pic/detection result-" + str( int(self.frameId)) + ".jpg" cv2.imwrite(filename, self.QueryImg) self.frameId += 1 print('------') return res
frame_remapped, cv2.COLOR_BGR2GRAY) # aruco.detectMarkers() requires gray image corners, ids, rejectedImgPoints = aruco.detectMarkers( frame_remapped_gray, aruco_dict, parameters=arucoParams) # First, detect markers aruco.refineDetectedMarkers(frame_remapped_gray, board, corners, ids, rejectedImgPoints) if ids != None: # if there is at least one marker detected charucoretval, charucoCorners, charucoIds = aruco.interpolateCornersCharuco( corners, ids, frame_remapped_gray, board) im_with_charuco_board = aruco.drawDetectedCornersCharuco( frame_remapped, charucoCorners, charucoIds, (0, 255, 0)) retval, rvec, tvec = aruco.estimatePoseCharucoBoard( charucoCorners, charucoIds, board, camera_matrix, dist_coeffs) # posture estimation from a charuco board if retval == True: im_with_charuco_board = aruco.drawAxis( im_with_charuco_board, camera_matrix, dist_coeffs, rvec, tvec, 100 ) # axis length 100 can be changed according to your requirement else: im_with_charuco_left = frame_remapped cv2.imshow("charucoboard", im_with_charuco_board) if cv2.waitKey(2) & 0xFF == ord('q'): break else: break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=aruco_parameters) # if enough markers were detected # then process the board if( ids is not None ): #gray = aruco.drawDetectedMarkers(gray, corners) ret, ch_corners, ch_ids = aruco.interpolateCornersCharuco(corners, ids, gray, charuco_board ) # if there are enough corners to get a reasonable result if( ret > 5 ): aruco.drawDetectedCornersCharuco(frame,ch_corners,ch_ids,(0,0,255) ) retval, rvec, tvec = aruco.estimatePoseCharucoBoard( ch_corners, ch_ids, charuco_board, camera_matrix, dist_coeffs ) # if a pose could be estimated if( retval ) : frame = aruco.drawAxis(frame,camera_matrix,dist_coeffs,rvec,tvec,0.032) # imshow and waitKey are required for the window # to open on a mac. cv2.imshow('frame', frame) if( cv2.waitKey(1) & 0xFF == ord('q') ): break cap.release() cv2.destroyAllWindows()
# font for displaying text (below) font = cv2.FONT_HERSHEY_SIMPLEX # check if the ids list is not empty # if no check is added the code will crash if np.all(ids != None): # estimate pose of each marker and return the values # rvet and tvec-different from camera coefficients # rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, 0.05, mtx, dist) # retval, rvec, tvec = aruco.estimatePoseBoard( # corners, ids, CHARUCO_BOARD, mtx, dist # ) retval, rvec, tvec = aruco.estimatePoseCharucoBoard( charucoCorners, charucoIds, board, cameraMatrix, distCoeffs, rvec, tvec) # (rvec-tvec).any() # get rid of that nasty numpy value array error for i in range(0, ids.size): # draw axis for the aruco markers aruco.drawAxis(frame, mtx, dist, rvec[i], tvec[i], 0.1) # draw a square around the markers aruco.drawDetectedMarkers(frame, corners) # code to show ids of the marker found strg = "" for i in range(0, ids.size): strg += str(ids[i][0]) + ", "
def findCharucoBoard(self, req): self.rvecs_arr = np.zeros((3, NUMBER)) self.tvecs_arr = np.zeros((3, NUMBER)) res = aruco_infoResponse() for order in range(NUMBER): # Capturing each frame of our video stream # ret, self.QueryImg = self.cam.read() # frames = self.pipeline.wait_for_frames() # color_frame = frames.get_color_frame() # self.QueryImg = np.asanyarray(color_frame.get_data()) # grayscale image gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY) # Detect Aruco markers corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS) # Refine detected markers # Eliminates markers not part of our board, adds missing markers to the board corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers( image=gray, board=CHARUCO_BOARD, detectedCorners=corners, detectedIds=ids, rejectedCorners=rejectedImgPoints, cameraMatrix=self.cameraMatrix, distCoeffs=self.distCoeffs) # Require 15 markers before drawing axis if ids is not None and len(ids) > 10: response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco( markerCorners=corners, markerIds=ids, image=gray, board=CHARUCO_BOARD) # Require more than 20 squares if response is not None and response > 20: # Estimate the posture of the charuco board, which is a construction of 3D space based on the 2D video pose, rvec_, tvec = aruco.estimatePoseCharucoBoard( charucoCorners=charuco_corners, charucoIds=charuco_ids, board=CHARUCO_BOARD, cameraMatrix=self.cameraMatrix, distCoeffs=self.distCoeffs, rvec=np.zeros(3), tvec=np.zeros(3)) # self.rvecs, self.tvecs = aruco.estimatePoseSingleMarkers(corners, self.markersize, self.cameraMatrix, self.distCoeffs) # for _id, rvec, tvec in zip(ids, self.rvecs, self.tvecs): if pose: print(rvec_) print(tvec) if order == 0: print( "=============================================" ) print(rvec_) print(tvec) for i in range(3): self.rvecs_arr[i][order] = rvec_[i] self.tvecs_arr[i][order] = tvec[i] # self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, rvec, tvec, 0.02) cv2.waitKey(10) # Display our image # print('self.rvecs_arr = ', self.rvecs_arr) # print('self.tvecs_arr = ', self.tvecs_arr) cv2.destroyAllWindows() r_avg = np.zeros(3) t_avg = np.zeros(3) ra = self.rvecs_arr[0].nonzero() rb = self.rvecs_arr[1].nonzero() rc = self.rvecs_arr[2].nonzero() tx = self.tvecs_arr[0].nonzero() ty = self.tvecs_arr[1].nonzero() tz = self.tvecs_arr[2].nonzero() ra = self.rvecs_arr[0][ra] rb = self.rvecs_arr[1][rb] rc = self.rvecs_arr[2][rc] tx = self.tvecs_arr[0][tx] ty = self.tvecs_arr[1][ty] tz = self.tvecs_arr[2][tz] ra = np.sort(ra, kind='quicksort') rb = np.sort(rb, kind='quicksort') rc = np.sort(rc, kind='quicksort') tx = np.sort(tx, kind='quicksort') ty = np.sort(ty, kind='quicksort') tz = np.sort(tz, kind='quicksort') r = np.array((ra, rb, rc)) t = np.array((tx, ty, tz)) for i in range(3): rv, tv = r[i], t[i] while np.std(rv) > 0.01 and len(rv) >= NUMBER * 0.2: if abs(rv[0] - np.average(rv)) > abs(rv[-1] - np.average(rv)): rv = np.delete(rv, 0) else: rv = np.delete(rv, -1) while np.std(tv) > 0.01 and len(tv) >= NUMBER * 0.2: if abs(tv[0] - np.average(tv)) > abs(tv[-1] - np.average(tv)): tv = np.delete(tv, 0) else: tv = np.delete(tv, -1) r_avg[i] = np.average(rv) t_avg[i] = np.average(tv) # print('[_id, r,t] = ', [_id, r,t]) # res.ids.append(_id) # res.rvecs = np.append(res.rvecs, r_avg) # res.tvecs = np.append(res.tvecs, t_avg) res.rvecs = r_avg res.tvecs = t_avg print('res.rvecs is ', res.rvecs) print('res.tvecs is ', res.tvecs) result = np.array(()) result = np.append(result, [np.copy(r_avg), np.copy(t_avg)]) result = result.reshape(2, 3) if self.name == 'test': # Outline all of the markers detected in our image self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners, borderColor=(0, 0, 255)) self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, result[0], result[1], 0.02) curr_path = os.path.dirname(os.path.abspath(__file__)) filename = curr_path + "/pic/camera-pic-of-charucoboard-" + str( int(self.frameId)) + ".jpg" cv2.imwrite(filename, self.QueryImg) self.frameId += 1 # cv2.imwrite('./123%d.jpg'%self.cnd, self.QueryImg) # self.cnd += 1 # cv2.namedWindow('Amanda', cv2.WINDOW_AUTOSIZE) # self.QueryImg = cv2.imread('./123%d.jpg'%self.cnd) # cv2.imshow('Amanda', self.QueryImg) # cv2.waitKey(1000) # cv2.destroyAllWindows() # time.sleep(2) print('------') # while not cv2.waitKey(1) & 0xFF == ord('q'): # pass # cv2.destroyAllWindows() print("self.cameraMatrix.reshape(1,9):", self.cameraMatrix.reshape(1, 9)[0]) res.camera_mat = self.cameraMatrix.reshape(1, 9)[0] return res
camera.getFrames() cv2.imshow("image", camera.color_img) # convert frame to grayscale gray = cv2.cvtColor(camera.color_img, cv2.COLOR_BGR2GRAY) corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, dictionary, parameters=arucoParams) # First, detect markers aruco.refineDetectedMarkers(gray, charucoBoard, corners, ids, rejectedImgPoints) if ids is not None: # if there is at least one marker detected charucoretval, charucoCorners, charucoIds = aruco.interpolateCornersCharuco( corners, ids, gray, charucoBoard) im_with_charuco_board = aruco.drawDetectedCornersCharuco( gray, charucoCorners, charucoIds, (0, 255, 0)) rvec, tvec = np.zeros(3), np.zeros(3) retval, _, _ = aruco.estimatePoseCharucoBoard( charucoCorners, charucoIds, charucoBoard, cameraMatrix, distCoeffs, rvec, tvec) # posture estimation from a charuco board if retval == True: im_with_charuco_board = aruco.drawAxis( im_with_charuco_board, cameraMatrix, distCoeffs, rvec, tvec, 0.05 ) # axis length 100 can be changed according to your requirement camera.close() cv2.destroyAllWindows()
# Only try to find CharucoBoard if we found markers if ids is not None and len(ids) > 10: # Get charuco corners and ids from detected aruco markers response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco( markerCorners=corners, markerIds=ids, image=gray, board=CHARUCO_BOARD) # Require more than 20 squares if response is not None and response > 20: # Estimate the posture of the charuco board, which is a construction of 3D space based on the 2D video pose, rvec, tvec = aruco.estimatePoseCharucoBoard( charucoCorners=charuco_corners, charucoIds=charuco_ids, board=CHARUCO_BOARD, cameraMatrix=cameraMatrix, distCoeffs=distCoeffs) if pose: # Draw the camera posture calculated from the gridboard QueryImg = aruco.drawAxis(QueryImg, cameraMatrix, distCoeffs, rvec, tvec, 0.3) # Display our image cv2.imshow('QueryImage', QueryImg) # Exit at the end of the video on the 'q' keypress if cv2.waitKey(1) & 0xFF == ord('q'): break cv2.destroyAllWindows()