def draw_axis_on_board(self): board = aruco.GridBoard_create(self.MarkerX,self.MarkerY,0.5,0.01,self.aruco_dict) cam_matrix,dist_coeff,rvecs,tvecs= self.read_cam_matrix(self.outputfile) im = Imutils_Master() while True: img = im.getframe() gray = im.gray(img) corners,ids,rejectedImgPoints = aruco.detectMarkers(gray, self.aruco_dict, parameters=self.parameters) corners1,ids1,rejectedImgPoints1,rec_index = aruco.refineDetectedMarkers(gray,board, corners, ids,rejectedImgPoints) getcount = self.get_marker_count(ids1) if getcount>0: ret,rv,tv = aruco.estimatePoseBoard(corners1,ids1,board,cam_matrix,dist_coeff) if ret==2: # print(ret,rv,tv) # obj_points,imgpoints = aruco.getBoardObjectAndImagePoints(board,corners1,ids1) # ret,rv1,tv1,inline = cv2.solvePnPRansac(obj_points, imgpoints, cam_matrix, dist_coeff) # ret,rv1,tv1 = cv2.solvePnP(obj_points, imgpoints, cam_matrix, dist_coeff) aruco.drawAxis(img,cam_matrix,dist_coeff,rv,tv,0.3) # imgpts = np.int32(imgpoints[1]).reshape(-1,2) # # draw ground floor in green # img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3) # for cor in corners1: # r,t ,_objpoints = aruco.estimatePoseSingleMarkers(cor,0.5,cam_matrix,dist_coeff) # # aruco.drawDetectedMarkers(img, corners,ids,(0,255,0)) # if r is not None: # aruco.drawAxis(img,cam_matrix,dist_coeff,r,t,0.3) cv2.imshow("frame",img) if cv2.waitKey(1) & 0xFF == ord('q'): cv2.destroyAllWindows() break
def reality(self, cp, cp2, sp): border = np.float32([[0, 0, 0], [0.3, 0, 0], [0.3, 0.4, 0], [0, 0.4, 0], [0, 0, 1], [0.3, 0, 1], [0.3, 0.4, 1], [0, 0.4, 1]]) while True: _, frame = self.cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) cv2.imshow('frame', frame) corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, self.dictionary, parameters=self.parameters, cameraMatrix=self.mtx, distCoeff=self.dist) cv2.imshow('corners', frame) if (len(corners) > 0): valid, rvecs, tvecs = aruco.estimatePoseBoard( corners, ids, self.board, self.mtx, self.dist) if (valid): #frame = aruco.drawAxis(frame, self.mtx, self.dist, rvecs, tvecs, 0.1) cp1, _ = cv2.projectPoints(cp1, rvecs, tvecs, self.mtx, self.dist) cp2, _ = cv2.projectPoints(cp2, rvecs, tvecs, self.mtx, self.dist) bdr, _ = cv2.projectPoints(border, rvecs, tvecs, self.mtx, self.dist) #sp , _ = cv2.projectPoints(sp,rvecs,tvecs,self.mtx,self.dist) img = self.draw(frame, cp1, cp2, sp, bdr) cv2.imshow('img', img) #return cv2.imshow('img2', frame) #return cv2.waitKey(10)
def set_pose(self, matrix, distortion): if self.visible: retval, self.rvec, self.tvec = aruco.estimatePoseBoard(self.corners, self.ids, self.board, matrix, distortion) self.rot_mtx, jacobian = cv2.Rodrigues(self.rvec) self.inv_rot_mtx = np.linalg.inv(self.rot_mtx) self.inv_mtx = np.linalg.inv(matrix) # camera coordinate in board coordinate system self.tvec_camera = -1 * np.matmul(self.inv_rot_mtx, self.tvec) self.roi_points, jacobian = cv2.projectPoints(self.field_corners, self.rvec, self.tvec, matrix, distortion) self.roi_points = self.roi_points.astype(int) distances = np.empty(0) for center in self.field_corners: distance = np.linalg.norm(center - np.reshape(self.tvec_camera, 3)) distances = np.append(distances, [distance], axis=0) index_min = np.argmin(distances) index_max = np.argmax(distances) self.radius_min = radius_on_image(self.field_corners[index_max], self.ball.radius, self.tvec_camera, self.rvec, self.tvec, matrix, distortion) * (1 - self.tolerance) self.radius_max = radius_on_image(self.field_corners[index_min], self.ball.radius, self.tvec_camera, self.rvec, self.tvec, matrix, distortion) * (1 + self.tolerance) if retval != 0: self.set = True else: self.set = False self.goal.set = False else: self.set = False self.goal.set = False print('No Marker Visible')
def detect(self, img) -> (tuple, int): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) corners, ids, rejected_img_points = aruco.detectMarkers( gray, self.marker_dict, parameters=self.parameters, cameraMatrix=self.cam_param.camera_matrix, distCoeff=self.cam_param.distortion) self.success, rotation, translation = aruco.estimatePoseBoard( corners, ids, self.board, self.cam_param.camera_matrix, self.cam_param.distortion) if self.success: rvec = rotation.copy() tvec = translation.copy() camera_to_robot = Transform.from_parameters( np.asscalar(tvec[0]), np.asscalar(tvec[1]), np.asscalar(tvec[2]), np.asscalar(rvec[0]), np.asscalar(rvec[1]), np.asscalar(rvec[2])) world_to_robot = self.coordinate_converter.world_from_camera( camera_to_robot) robot_info = world_to_robot.to_parameters(True) position_x = robot_info[0] position_y = robot_info[1] orientation = robot_info[5] return Robot((position_x, position_y), orientation) else: return None
def cal_aruco(corners, ids,i): retval, rvec, tvec = aruco.estimatePoseBoard( corners, ids, board[i], camera_matrix, dist_coeffs, None, None) if not retval: return 0, 0, 0 return rvec, tvec, 1
def pose_estimation(img, board, aruco_dict, arucoParams, mtx, dist): """ Estimates the R and T matrices from aruco board. Parameters: img : The original image. board: Aruco board object. aruco_dict: Dictionary for aruco markers. arucoParams: Parameters for aruco. mtx: The camera matrix. dist: Distortion coefficients. Returns: ret: Value set as 1 if the pose is estimated, 0 otherwise. rvec: Rotation vector of camera. tvec: Translation vector of camera. corners: Pixels where the aruco markers' corners where found. ids: List of markers' ids. """ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-3) img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) corners, ids, rejectedImgPoints = aruco.detectMarkers( img_gray, aruco_dict, parameters=arucoParams) for k in range(len(corners)): cv2.cornerSubPix(img_gray, corners[k], (5, 5), (-1, -1), criteria) if not corners: print("No markers detected. Can't estimate pose.") return 0, None, None, None, None else: ret, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, mtx, dist, None, None) return 1, rvec, tvec, corners, ids
def estimatePose(self, corners, ids, cameraMatrix, distCoeffs): rvec = np.array([0.0, 0.0, 0.0]) tvec = np.array([0.0, 0.0, 0.0]) (valid, rvec, tvec) = aruco.estimatePoseBoard(corners, ids, self.__aoi__, cameraMatrix, distCoeffs, rvec, tvec) return (valid, rvec, tvec)
def get_board_range(self, board): detected_board_ids = list() detected_board_corners = list() if(self.get_visible_ids() is not None): for i in range(len(self.get_visible_ids())): detected_id = self.get_visible_ids()[i] detected_corner = self.corners[i] if(detected_id in board.ids.tolist()): detected_board_ids.append(detected_id) detected_board_corners.append(detected_corner) if(detected_board_ids): detected_board_ids = np.asarray(detected_board_ids) detected_board_corners = np.asarray(detected_board_corners) retval, rvec, tvec = aruco.estimatePoseBoard(detected_board_corners, detected_board_ids, board, self.camera_matrix, self.dist_coeffs) self.frame = aruco.drawAxis( self.frame, self.camera_matrix, self.dist_coeffs, rvec, tvec, self.axis_length_inches) distance_to_board = np.linalg.norm(tvec) return distance_to_board # board not visible, distance unknown return None
def callback(data): global theID, thePoint, cam_tf, cam_pub ####for real tello #camera_matrix = np.array([[921.170702, 0.000000, 459.904354], [ # 0.000000, 919.018377, 351.238301], [0.000000, 0.000000, 1.000000]]) #dist_coeffs = np.array( # [-0.033458, 0.105152, 0.001256, -0.006647, 0.000000]) ####for sim camera_matrix = np.array([[562, 0.000000, 480.5], [0.000000, 562, 360.5], [0.000000, 0.000000, 1.000000]]) dist_coeffs = np.array([0, 0, 0, 0, 0]) aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL) board_corners = [np.array(thePoint, dtype=np.float32)] board_ids = np.array([theID], dtype=np.int32) board = aruco.Board_create( board_corners, aruco.getPredefinedDictionary(aruco.DICT_ARUCO_ORIGINAL), board_ids) i = 0 corners = [] ids = np.array([data.id], dtype=np.int32) corners = np.array([[[data.points[0].x, data.points[0].y], [data.points[1].x, data.points[1].y], [data.points[2].x, data.points[2].y], [data.points[3].x, data.points[3].y]]], dtype=np.float32) retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, camera_matrix, dist_coeffs, None, None) if retval == 0: return 0, 0, 0 a = rvec b = tvec rr, _ = cv2.Rodrigues(np.array([a[0][0], a[1][0], a[2][0]])) tt = np.array([b[0][0], b[1][0], b[2][0] + 0.21]) cam_r = rr.transpose() cam_t = -cam_r.dot(tt) cam_x = cam_t[0] cam_y = cam_t[1] cam_z = cam_t[2] cam_q = quaternion_from_matrix(cam_r) tq = [-0.5, 0.5, 0.5, 0.5] q3 = qmulq(tq, cam_q) cam_tf.sendTransform((cam_x, cam_y, cam_z), cam_q, rospy.Time.now(), "Tello_cam_from_marker" + str(theID), "world") cam_tf_t.sendTransform((0, 0, 0), (0.5, -0.5, 0.5, 0.5), rospy.Time.now(), "Tello_cam_from_marker_world" + str(theID), "Tello_cam_from_marker" + str(theID)) pubmsg = PoseStamped() pubmsg.pose.position.x = cam_x pubmsg.pose.position.y = cam_y pubmsg.pose.position.z = cam_z pubmsg.pose.orientation.w = q3[3] pubmsg.pose.orientation.x = q3[0] pubmsg.pose.orientation.y = q3[1] pubmsg.pose.orientation.z = q3[2] pubmsg.header = data.header pubmsg.header.frame_id = "world" cam_pub.publish(pubmsg)
def talker(): # generate the aruco board # note this function is deterministic so it will match the board # generated by `create_board.py` as long as the parameters from # `consts.py` don't change. board = aruco.GridBoard_create(consts.ROWS, consts.COLS, consts.markerLength, consts.markerSeparation, consts.aruco_dict) # init video capture cap = cv2.VideoCapture(0) # init ros publisher node pub = rospy.Publisher('chatter', String, queue_size=10) rospy.init_node('talker_1') rate = rospy.Rate(10) # get existing calibration data for pose estimation mtx = extract_calibration.camera_matrix dist = extract_calibration.dist_matrix font = cv2.FONT_HERSHEY_SIMPLEX while not rospy.is_shutdown(): ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50) parameters = aruco.DetectorParameters_create() # we need to detect the markers before trying to detect the board corners, ids, rejected = aruco.detectMarkers(gray, aruco_dict, parameters=parameters) aruco.refineDetectedMarkers(gray, board, corners, ids, rejected) if np.all(ids != None): # estimate pose retval, rvec, tvec = aruco.estimatePoseBoard( corners, ids, board, mtx, dist) info = [ids, corners, rvec, tvec, retval] pub.publish(str(info)) if DEBUG: rospy.loginfo(str(info)) if DISPLAY: aruco.drawAxis(frame, mtx, dist, rvec, tvec, 0.1) aruco.drawDetectedmarkers(frame, corners, ids) cv2.putText(frame, "ID: " + str(ids), (0, 64), font, 1, (0, 255, 0), 2, cv2.LINE_AA) cv2.imshow('frame', frame) rate.sleep() if cv2.waitKey(1) & 0xFF == ord('q'): break # release the capture cap.release() cv2.destroyAllWindows()
def detect_board(): num_x = 2 num_y = 2 dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250) board = aruco.GridBoard_create(num_x, num_y, .0932, .01869, dictionary) vs = VideoStream(usePiCamera=True, resolution=res).start() fps = FPS().start() time.sleep(1) freq = 0 i = 0 target_x = [] target_y = [] while (i < 100): tf = time.time() # Capture frame-by-frame frame = vs.read() # Our operations on the frame come here gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) parameters = aruco.DetectorParameters_create() # print(parameters) status = "No Marker Detected" position_dict = {} corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, dictionary, parameters=parameters) if ids is not None: frame = aruco.drawDetectedMarkers(frame, corners, ids) l, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, camera_matrix, dist_coeff) print(rvec) print(tvec) frame = aruco.drawAxis(frame, camera_matrix, dist_coeff, rvec, tvec, .1) camera_points = cv2.projectPoints( np.array([np.array([0, 0, 1.0]).T]), rvec, tvec, camera_matrix, dist_coeff) camera_points = np.array(camera_points[0][0][0]) status = "Target detect, x: %.2f, y: %.2f" % (camera_points[0], camera_points[1]) if np.linalg.norm(camera_points) <= 10e6: cv2.circle(frame, (int(camera_points[0]), int(camera_points[1])), 6, (255, 0, 0)) # Display the resulting frame cv2.putText(frame, status, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), 2) out.write(frame) i += 1 freq = .7 * freq + .3 * (1 / (time.time() - tf)) fps.update() print("Operating frequency: %.2f Hz" % freq) # When everything done, release the capture out.release() vs.stop() cv2.destroyAllWindows()
def validate_results(board, aruco_dict, aruco_params): """ Validate calibrations results :board: aruco.GridBoard obj :aruco_dict: aruco.Dictionary obj :aruco_params: aruco.DetectorParameters obj :returns: None """ camera = cv2.VideoCapture(0) ret, img = camera.read() with open('calibration.yaml') as f: loadeddict = yaml.load(f) mtx = loadeddict.get('camera_matrix') dist = loadeddict.get('dist_coeff') mtx = np.array(mtx) dist = np.array(dist) ret, img = camera.read() img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) h, w = img_gray.shape[:2] newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h)) print(newcameramtx) pose_r, pose_t = [], [] while True: time.sleep(0.1) for i in range(4): camera.grab() ret, img = camera.read() img_aruco = img im_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) h, w = im_gray.shape[:2] dst = cv2.undistort(im_gray, mtx, dist, None, newcameramtx) corners, ids, rejectedImgPoints = aruco.detectMarkers( dst, aruco_dict, parameters=aruco_params) # print(corners, ids) cv2.imshow("dst", dst) if corners == None: print("pass") else: ret, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, newcameramtx, dist) # For a board # print ("Rotation ", rvec, "Translation", tvec) if ret != 0: img_aruco = aruco.drawDetectedMarkers(img, corners, ids, (0, 255, 0)) img_aruco = aruco.drawAxis( img_aruco, newcameramtx, dist, rvec, tvec, 10 ) # axis length 100 can be changed according to your requirement # if cv2.waitKey(1) & 0xFF == ord('q'): # break; cv2.waitKey(1) cv2.imshow("World co-ordinate frame axes", img_aruco)
def aruco(): print("getting data from file") cam_matrix, dist_coefs, rvecs, tvecs = get_cam_matrix( "camera_matrix_aruco.yaml") # aruco data aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL) parameters = aruco.DetectorParameters_create() corner_points = [] count = 1 img = vs.read() height, width, channels = img.shape out = cv2.VideoWriter('outpy1.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30, (width, height)) while True: count = 1 img = get_frame(vs) gray = convert_gray(img) corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, aruco_dict, parameters=parameters) if ids is not None and corners is not None: # obj = aruco.drawDetectedMarkers(img, corners,ids,(255,0,0)) for cor in corners: r, t, _objpoints = aruco.estimatePoseSingleMarkers( cor, 0.5, cam_matrix, dist_coefs) # aruco.drawAxis(img,cam_matrix,dist_coefs,r,t,0.2) # if len(corners) == len(ids): board = aruco.GridBoard_create(6, 8, 0.05, 0.01, aruco_dict) corners, ids, rejectedImgPoints, rec_idx = aruco.refineDetectedMarkers( gray, board, corners, ids, rejectedImgPoints) ret, rv, tv = aruco.estimatePoseBoard(corners, ids, board, cam_matrix, dist_coefs) if ret: aruco.drawAxis(img, cam_matrix, dist_coefs, rv, tv, 0.2) obj_points, imgpoints = aruco.getBoardObjectAndImagePoints( board, corners, ids) # imgpts = np.int32(imgpoints).reshape(-1,2) # # draw ground floor in green # img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),3) # out.write(img) cv2.imshow("Markers", img) if cv2.waitKey(1) & 0xFF == ord('q'): break out.release() cv2.destroyAllWindows()
def estimate_pose_aruco(gray, intrinsics, board): detectedCorners, detectedIds = detect_aruco(gray, intrinsics, board) if len(detectedIds) < 3: return False, None INTRINSICS_K = np.array(intrinsics['camera_mat']) INTRINSICS_D = np.array(intrinsics['dist_coeff']) ret, rvec, tvec = aruco.estimatePoseBoard(detectedCorners, detectedIds, board, INTRINSICS_K, INTRINSICS_D) return True, (detectedCorners, detectedIds, rvec, tvec)
def draw_board_axis( board, board_corners, board_ids, frame, camera_matrix, dist_coeffs, axis_length ): corners = np.asarray(board_corners) ids = np.asarray(board_ids) retval,rvec,tvec = aruco.estimatePoseBoard(corners, ids, board, camera_matrix, dist_coeffs) frame = aruco.drawAxis( frame, camera_matrix, dist_coeffs, rvec, tvec, axis_length_inches) axis_origin = np.asarray( [[0,0,0]],dtype=np.float ) imgpts, jac = cv2.projectPoints( axis_origin, rvec, tvec, camera_matrix, dist_coeffs ) text_base = (int(imgpts[0][0][0]),int(imgpts[0][0][1])-100) #print(text_base) board_range = np.linalg.norm(tvec) board_range_str = "Range: {:.2f} meters".format( board_range ) cv2.putText(frame, board_range_str, text_base, font, 1, (255,255,255),2,cv2.LINE_AA) return board_range
def detect_markers(): vs = VideoStream(usePiCamera=True, resolution=res).start() fps = FPS().start() time.sleep(1) freq = 0 i = 0 target_x = [] target_y = [] dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250) while (i < 50): tf = time.time() # Capture frame-by-frame frame = vs.read() # Our operations on the frame come here gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) parameters = aruco.DetectorParameters_create() #print(parameters)) status = "No Marker Detected" position_dict = {} corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, aruco_dict, parameters=parameters) if ids is not None: frame = aruco.drawDetectedMarkers(frame, corners, ids) #rvec,tvec,_ = aruco.estimatePoseSingleMarkers(corners,.2,camera_matrix,dist_coeff) rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, camera_matrix, dist_coeff) for i in range(len(ids)): frame = aruco.drawAxis(frame, camera_matrix, dist_coeff, rvec[i], tvec[i], .1) rmat = cv2.Rodrigues(rvec[i])[0] position_camera = -np.matrix(rmat).T * np.matrix(tvec[i]).T position_dict[ids[i][0]] = position_camera status = ("x: %.2f, y: %.2f, z: %.2f") % ( position_camera[0], position_camera[1], position_camera[2]) # Display the resulting frame cv2.putText(frame, status, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), 2) out.write(frame) i += 1 freq = .7 * freq + .3 * (1 / (time.time() - tf)) fps.update() print("Operating frequency: %.2f Hz" % freq) # When everything done, release the capture out.release() vs.stop() cv2.destroyAllWindows()
def detect(self, img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, self.marker_dict, parameters=self.parameters, cameraMatrix=self.cam_param.intrinsic_parameters['cameraMatrix'], distCoeff=self.cam_param.intrinsic_parameters['distCoef']) self.success, rotation, translation_ = aruco.estimatePoseBoard( corners, ids, self.board, self.cam_param.intrinsic_parameters['cameraMatrix'], self.cam_param.intrinsic_parameters['distCoef']) if self.success: rvec = rotation.copy() tvec = translation_.copy() img = aruco.drawDetectedMarkers(img, corners, ids) img = aruco.drawAxis( img, self.cam_param.intrinsic_parameters['cameraMatrix'], self.cam_param.intrinsic_parameters['distCoef'], rvec, tvec, length=100) #print(rvec) #print(tvec) camera_to_stylus = transform.Transform.from_parameters( np.asscalar(tvec[0]), np.asscalar(tvec[1]), np.asscalar(tvec[2]), np.asscalar(rvec[0]), np.asscalar(rvec[1]), np.asscalar(rvec[2])) camera_to_tip = camera_to_stylus.combine(self.stylus_to_tip, True) # TODO return position + orientation of the stylus world_to_tip = self.world_to_camera.combine(camera_to_tip, True) tip_info = world_to_tip.to_parameters(True) position_x = tip_info[0] position_y = tip_info[1] position_z = tip_info[2] return img, (position_x, position_y, position_z, 1) else: return img, None
def detect_markers(img, params, root_dir, file_name): o_path = os.path.join(root_dir, "tmp") if not os.path.exists(o_path): os.makedirs(o_path) ####### aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_1000) path_board_size = os.path.join(root_dir, "board_size.txt") assert os.path.exists(path_board_size), path_board_size board_size = np.loadtxt(path_board_size) print("[*] board_size : {}".format(board_size)) markerLength = board_size[0] markerSeparation = board_size[1] board = aruco.GridBoard_create(5, 7, markerLength, markerSeparation, aruco_dict) #### intrinsic = params["intrinsic"] dist = params["dist"] arucoParams = aruco.DetectorParameters_create() img = cv2.undistort(img, intrinsic, dist) img_gray = cv2.cvtColor(img[:, :, ::-1], cv2.COLOR_BGR2GRAY) corners, ids, rejectedImgPoints = aruco.detectMarkers(img_gray, aruco_dict, parameters=arucoParams) corners, ids, rej, recovered_ids = aruco.refineDetectedMarkers(img_gray, board, corners, ids, rejectedImgPoints) dist[:] = 0 retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, intrinsic, dist) R, _ = cv2.Rodrigues(rvec) tmp_img = aruco.drawDetectedMarkers(img[:, :, ::-1].copy(), corners, ids, (0, 255, 0)) tmp_img = aruco.drawAxis(tmp_img, intrinsic, dist, rvec, tvec, 100) cv2.imwrite(os.path.join(o_path, "{}_marker_detected.png".format(file_name)), tmp_img) ######## marker_coordinates = np.zeros(shape=(5 * 7, 4, 2)) for index in range(len(ids)): try: marker_coordinates[ids[index], :, :] = corners[index].reshape(4, 2) except Exception as e: print("ERROR: ", end=" ") print(ids[index], corners[index]) board_objPoints = np.array(board.objPoints) return marker_coordinates, board_objPoints, R, tvec
def board_pose(self, board, corners, ids): retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, self.camera_matrix, self.dist_coeffs) # draw the boards origin # note: axis_length_inches is only related to drawing # the image of the axis, not any pose estimation draw_axis_length_m = 0.02 self.processed_frame = aruco.drawAxis(self.processed_frame, self.camera_matrix, self.dist_coeffs, rvec, tvec, draw_axis_length_m) # translate TVEC from camera origin to robot origin CAMERA_OFFSET_M = [[0], [0.085], [0.084]] camera_tvec = np.asarray(CAMERA_OFFSET_M) corrected_tvec = np.add(tvec, camera_tvec) return retval, rvec, corrected_tvec
def postEst(corners, ids, camMat, distCoeffs, markerLength=ARUCO_MARKER_LENGTH, arucoBoardHeight=0.2884, arucoBoardWidth=0.5932): arucoCornerBoard, _ = arucoBoard(markerLength, arucoBoardHeight, arucoBoardWidth) rvec = [] tvec = [] retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, arucoCornerBoard, camMat, distCoeffs, None, None) return rvec, tvec
def find_aruco(img): frame = img gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gus = cv2.GaussianBlur(gray, (11, 11), 0) corners, ids, rejectedImgPoints = aruco.detectMarkers(gus, aruco_dict) #frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids) retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, camera_matrix, dist_coeffs, None, None) # print(rvec) # print(tvec) if not retval: return 0, 0, 0 return rvec, tvec, 1 frame = aruco.drawAxis(frame, camera_matrix, dist_coeffs, rvec, tvec, aruco_marker_length_meters) frame = aruco.drawDetectedMarkers(frame, corners, ids) cv2.imshow("A", frame) cv2.waitKey(1)
def image_callback(self, data): # Define variables for messages to publish pose_msg = Pose() bool_msg = Bool() # Convert from ROS message to OpenCV image cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding="passthrough") # Find corners and IDs of aruco markers corners, ids, rejectedImgPoints = aruco.detectMarkers( cv_image, self.aruco_dict, parameters=self.aruco_param) aruco.refineDetectedMarkers(cv_image, self.aruco_board, corners, ids, rejectedImgPoints, self.cmatx, self.dist) # If markers found, estimate pose if ids is not None: retval, rvec, tvec = aruco.estimatePoseBoard( corners, ids, self.aruco_board, self.cmatx, self.dist) bool_msg.data = retval # If succesful, convert rvec fromm rpy to quaternion, fill pose message if retval: quat = tf.transformations.quaternion_from_euler( rvec[0], rvec[1], rvec[2]) pose_msg.position.x = tvec[0] pose_msg.position.y = tvec[1] pose_msg.position.z = tvec[2] pose_msg.orientation.x = quat[0] pose_msg.orientation.y = quat[1] pose_msg.orientation.z = quat[2] pose_msg.orientation.w = quat[3] else: bool_msg.data = False # Publish messages self.pose_pub.publish(pose_msg) self.bool_pub.publish(bool_msg)
def get_board_in_frame(self, frame, search_aruco_dict): markerLength = 0.04 markerSeparation = 0.01 markers_w = 7 markers_h = 5 grid_board = cv2.aruco.GridBoard_create(markers_w, markers_h, markerLength, markerSeparation, search_aruco_dict) corners, ids, rejectedImgPoints = aruco.detectMarkers( frame, search_aruco_dict) if ids is not None and len(ids) > 0: retval, rvecs, tvecs = aruco.estimatePoseBoard( corners, ids, grid_board, self.calibration_data["cam_mtx"], self.calibration_data["dist_coef"], 0, 0) return tvecs, rvecs return np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0])
def estimate_pose(gray, intrinsics, board): detectedCorners, detectedIds = detect_aruco(gray, intrinsics, board) if len(detectedIds) < 3: return False, None INTRINSICS_K = np.array(intrinsics['camera_mat']) INTRINSICS_D = np.array(intrinsics['dist_coeff']) ret, rvec, tvec = aruco.estimatePoseBoard(detectedCorners, detectedIds, board, INTRINSICS_K, INTRINSICS_D) # flip the orientation as needed to make all the cameras align rotmat, _ = cv2.Rodrigues(rvec) test = np.dot([1,1,0], rotmat).dot([0,0,1]) if test > 0: rvec[1,0] = -rvec[1,0] rvec[0,0] = -rvec[0,0] return True, (detectedCorners, detectedIds, rvec, tvec)
def update_frame(self, frame): self.frame = frame self.error = '' self.corners, self.ids, self.rejectedImgPoints = aruco.detectMarkers( self.frame, self.aruco_dict, parameters=self.parameters) self.rvecs, self.tvecs, self._corners = aruco.estimatePoseSingleMarkers( self.corners, self.markerLength, self.cameraMatrix, self.distCoeffs) self.corners_tracker, self.ids_tracker, _ = aruco.detectMarkers( self.frame, self.tracker_dict, parameters=self.parameters) self.rvecs_tracker, self.tvecs_tracker, _ = aruco.estimatePoseSingleMarkers( self.corners_tracker, markerLength_tracker, self.cameraMatrix, self.distCoeffs) size = self.arucoTrackerBoard.getBoardSize() board = aruco.GridBoard_create(size[0], size[1], markerLength_tracker, markerSeparation_tracker, self.tracker_dict) _, self.rvec_tracker, self.tvec_tracker = aruco.estimatePoseBoard( self.corners_tracker, self.ids_tracker, board, self.cameraMatrix, self.distCoeffs)
def eye_arm_calibrate(images, urs, mtx, dist, eye_in_hand=False, show=True): # 创建标定板模型 aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_50) parameters = aruco.DetectorParameters_create() board = aruco.GridBoard_create(2, 2, 0.08, 0.01, aruco_dict) # 从图像中得到标定板坐标系到相机坐标系的变换 cam = [] for img in images: gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, aruco_dict, parameters=parameters) retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, mtx, dist, None, None) cam.append(to_matrix(np.squeeze(np.r_[tvec, rvec]))) # urs是工具坐标系到基座坐标系的变换,在eye to hand的时候用的是基座坐标到手坐标的变换,要求逆 urs = [to_matrix(s) for s in urs] if not eye_in_hand: urs = [np.linalg.inv(s) for s in urs] # calibrateHandEye函数的输入要分开旋转矩阵和平移向量 R_gripper2base = np.array([pos[:3, :3] for pos in urs]) t_gripper2base = np.array([pos[:3, 3] for pos in urs]) R_target2cam = np.array([pos[:3, :3] for pos in cam]) t_target2cam = np.array([pos[:3, 3] for pos in cam]) R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye( R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, method=cv2.CALIB_HAND_EYE_PARK) # 把输出还原为4x4变换矩阵 cam2base = np.eye(4) cam2base[:3, :3] = np.squeeze(R_cam2gripper) cam2base[:3, 3] = np.squeeze(t_cam2gripper) print(cam2base) if show: display(cam2base, eye_in_hand) return cam2base
def detect_marker(imshow: bool = True): global camera, mtx, dist, h, w, newcameramtx, roi, pose_r, pose_t ret, img = camera.read() ret, img = camera.read() img_aruco = img im_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) h, w = im_gray.shape[:2] dst = cv2.undistort(im_gray, mtx, dist, None, newcameramtx) corners, ids, rejectedImgPoints = aruco.detectMarkers( dst, board_params["aruco_dict"], parameters=arucoParams) # cv2.imshow("original", img_gray) if not len(corners): # raise Exception( "No aruco corners found!" ) print('No corners') if imshow: cv2.imshow("World co-ordinate frame axes", img) return None, None else: rvec = None tvec = None ret, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, newcameramtx, dist, rvec, tvec) # For a board print("Rotation ", rvec, "\nTranslation", tvec, "\n=======") if ret != 0: img_aruco = aruco.drawDetectedMarkers(img, corners, ids, (0, 255, 0)) img_aruco = aruco.drawAxis( img_aruco, newcameramtx, dist, rvec, tvec, 10 ) # axis length 100 can be changed according to your requirement if imshow: cv2.imshow("World co-ordinate frame axes", img_aruco) rvec = np.array(rvec).flatten() tvec = np.array(tvec).flatten() tvec = tvec / 100 # from [cm] to [m] return rvec, tvec
def estimate_pose_aruco(gray, intrinsics, board): detectedCorners, detectedIds = detect_aruco(gray, intrinsics, board) if len(detectedIds) < 3: return False, None INTRINSICS_K = np.array(intrinsics['camera_mat']) INTRINSICS_D = np.array(intrinsics['dist_coeff']) board_type = get_board_type(board) if board_type == 'charuco': ret, rvec, tvec = aruco.estimatePoseCharucoBoard( detectedCorners, detectedIds, board, INTRINSICS_K, INTRINSICS_D) else: ret, rvec, tvec = aruco.estimatePoseBoard(detectedCorners, detectedIds, board, INTRINSICS_K, INTRINSICS_D) if not ret or rvec is None or tvec is None: return False, None return True, (detectedCorners, detectedIds, rvec, tvec)
def eye_arm_calibrate(images, urs, mtx, dist, eye_in_hand=False): aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_50) parameters = aruco.DetectorParameters_create() board = aruco.GridBoard_create(2, 2, 0.08, 0.01, aruco_dict) cam = [] for img in images: gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, aruco_dict, parameters=parameters) retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, mtx, dist, None, None) cam.append(to_matrix(np.squeeze(np.r_[tvec, rvec]))) urs = [to_matrix(s) for s in urs] if not eye_in_hand: urs = [np.linalg.inv(s) for s in urs] # cam = [np.linalg.inv(s) for s in cam] R_gripper2base = np.array([pos[:3, :3] for pos in urs]) t_gripper2base = np.array([pos[:3, 3] for pos in urs]) R_target2cam = np.array([pos[:3, :3] for pos in cam]) t_target2cam = np.array([pos[:3, 3] for pos in cam]) R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye( R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, method=cv2.CALIB_HAND_EYE_PARK) cam2base = np.eye(4) cam2base[:3, :3] = np.squeeze(R_cam2gripper) cam2base[:3, 3] = np.squeeze(t_cam2gripper) print(cam2base) cam2world = base2world.dot(cam2base) print(cam2world) zz = cam2world.dot([0, 0, 1, 1])[:3] print(zz / np.linalg.norm(zz)) display(cam2world) return cam2base
dist = np.array(loadeddict.get('dist_coeff')) ret, img = camera.read() img_gray = cv2.cvtColor(img, cv2.COLOR_RGBA2GRAY) h, w = img_gray.shape[:2] newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h)) pose_r, pose_t = [], [] while True: ret, img = camera.read() img_aruco = img im_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) h, w = im_gray.shape[:2] dst = cv2.undistort(im_gray, mtx, dist, None, newcameramtx) corners, ids, rejectedImgPoints = aruco.detectMarkers(dst,aruco_dict, parameters=arucoParams) if corners == None: print("pass") else: ret, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, newcameramtx, dist) # For a board print("Rotation ", rvec, "Translation", tvec) if ret != 0: img_aruco = aruco.drawDetectedMarkers(img, corners, ids, (0, 255, 0)) img_aruco = aruco.drawAxis(img_aruco, newcameramtx, dist, rvec, tvec, 10) # axis length 100 can be changed according to your requirement if cv2.waitKey(0) & 0xFF == ord('q'): break; cv2.imshow("World co-ordinate frame axes", img_aruco) cv2.destroyAllWindows()