Esempio n. 1
0
def estimate_camera_pose(camera_matrix: list[list[float]],
                         dist_matrix: list[float], image: Image.Image,
                         marker_size: float) -> dict[int, Pose]:

    camera_matrix_arr, dist_matrix_arr, gray, corners, ids = detect_corners(
        camera_matrix, dist_matrix, image, refine=True)

    ret: dict[int, Pose] = {}

    if np.all(ids is None):
        return ret

    # TODO do not perform estimation for un-configured markers
    rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, marker_size,
                                                    camera_matrix_arr,
                                                    dist_matrix_arr)

    rvec = rvec.reshape(len(ids), 3)
    tvec = tvec.reshape(len(ids), 3)

    if __debug__:
        backtorgb = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
        aruco.drawDetectedMarkers(backtorgb,
                                  corners)  # Draw A square around the markers

        for idx in range(len(ids)):
            aruco.drawAxis(backtorgb, camera_matrix_arr, dist_matrix_arr,
                           rvec[idx], tvec[idx], 0.15)

        cv2.imwrite("marker.jpg", backtorgb)

    for idx, mid in enumerate(ids):

        # convert pose of the marker wrt camera to pose of camera wrt marker
        # based on https://stackoverflow.com/a/51515560/3142796
        marker_rot_matrix, _ = cv2.Rodrigues(rvec[idx])

        assert np.allclose(np.linalg.inv(marker_rot_matrix),
                           marker_rot_matrix.transpose())
        assert math.isclose(np.linalg.det(marker_rot_matrix), 1)

        camera_rot_matrix = marker_rot_matrix.transpose()

        camera_trans_vector = np.matmul(-camera_rot_matrix,
                                        tvec[idx].reshape(3, 1)).flatten()

        o = Orientation.from_quaternion(
            quaternion.from_rotation_matrix(camera_rot_matrix))
        ret[mid[0]] = Pose(
            Position(camera_trans_vector[0], camera_trans_vector[1],
                     camera_trans_vector[2]), o)

    return ret
Esempio n. 2
0
def aruco():

	print("getting data from file")
	cam_matrix,dist_coefs,rvecs,tvecs = get_cam_matrix("camera_matrix_aruco.yaml")

	# aruco data

	aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
	parameters =  aruco.DetectorParameters_create()


	corner_points = [] 
	count =1
	img = vs.read()
	height, width, channels = img.shape
	out = cv2.VideoWriter('outpy1.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30, (width,height))
	while True:
		count =1

		img = get_frame(vs)
		gray = convert_gray(img)
		corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)

		if ids is not None and corners is not None:
			# obj = aruco.drawDetectedMarkers(img, corners,ids,(255,0,0))

			for cor in corners:
				r,t ,_objpoints = aruco.estimatePoseSingleMarkers(cor,0.5,cam_matrix,dist_coefs)
				# aruco.drawAxis(img,cam_matrix,dist_coefs,r,t,0.2)

		# if len(corners) == len(ids):
		board = aruco.GridBoard_create(6,8,0.05,0.01,aruco_dict)
		corners, ids, rejectedImgPoints,rec_idx = aruco.refineDetectedMarkers(gray,board,corners,ids,rejectedImgPoints)
		ret,rv,tv = aruco.estimatePoseBoard(corners,ids,board,cam_matrix,dist_coefs)
		if ret:
			aruco.drawAxis(img,cam_matrix,dist_coefs,rv,tv,0.2)
			obj_points,imgpoints = aruco.getBoardObjectAndImagePoints(board,corners,ids)

			# imgpts = np.int32(imgpoints).reshape(-1,2)
			# # draw ground floor in green
			# img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),3)


		

		# out.write(img)
		cv2.imshow("Markers",img)
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break

	out.release()
	cv2.destroyAllWindows()
Esempio n. 3
0
    def image_processing_callback(self, msg):
        frame = self.bridge.imgmsg_to_cv2(msg, "rgb8")
        height = msg.height
        width = msg.width
        self.number_of_pixels = height
        matrix_coefficients = np.mat([[1.0, 0.0, height / 2.0],
                                     [0.0, 1.0, width / 2.0],
                                     [0.0, 0.0, 1.0]])
        distortion_coefficients = np.mat([0.0, 0.0, 0.0, 0.0])

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # Change grayscale
        # Use 5x5 dictionary to find markers
        aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)
        # Marker detection parameters
        parameters = aruco.DetectorParameters_create()
        # lists of ids and the corners beloning to each id
        corners, ids, _ = aruco.detectMarkers(
                            gray,
                            aruco_dict,
                            parameters=parameters,
                            cameraMatrix=matrix_coefficients,
                            distCoeff=distortion_coefficients)
        print(ids, ": ID of AR-tag")
        if np.all(ids is not None):  # If there are markers found by detector
            for i in range(0, len(ids)):  # Iterate in markers
                rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                                    corners[i],
                                    0.1,
                                    matrix_coefficients,
                                    distortion_coefficients)
                # Draw A square around the markers
                aruco.drawDetectedMarkers(frame, corners)
                aruco.drawAxis(frame, matrix_coefficients,
                               distortion_coefficients,
                               rvec,
                               tvec,
                               0.1)  # Draw Axis
                top = corners[0][0][0][0]
                bottom = corners[0][0][2][0]
                if abs(top - bottom) > self.reach_threshold:
                    self.reached = True
                    self.get_logger().info('Reached the goal')
                else:
                    self.reached = False
                self.translation_x = tvec[0][0][0]
                self.stop = False
        else:
            self.translation_x = 0
            self.stop = True
        self.processedImage_publish.publish(self.bridge.cv2_to_imgmsg(frame,
                                                                      "rgb8"))
        self.regulate_direction()
Esempio n. 4
0
def talker():
    pub2 = rospy.Publisher('aruco_pose', Pose, queue_size=30)
    rospy.init_node('talker', anonymous=True)
    rate = rospy.Rate(30)  # 30hz
    j = 0
    while not rospy.is_shutdown():
        ret, frame = cap.read()
        j = j + 1
        print(j, "j")
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
        parameters = aruco.DetectorParameters_create()
        parameters.adaptiveThreshConstant = 10
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        font = cv2.FONT_HERSHEY_SIMPLEX

        if np.all(ids != None):
            rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                corners, marker_length, mtx, dist)
            position_data[j, :] = tvec[0].flatten()

            for i in range(0, ids.size):
                aruco.drawAxis(frame, mtx, dist, rvec[i], tvec[i], 5)

            aruco.drawDetectedMarkers(frame, corners)

            strg = ''
            for i in range(0, ids.size):
                strg += str(ids[i][0]) + ', '

            cv2.putText(frame, "Id: " + strg, (0, 64), font, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)

            aruco_pose = Pose()
            aruco_pose.position = Point(x=tvec[0][0][0],
                                        y=tvec[0][0][1],
                                        z=tvec[0][0][2])
            aruco_pose.orientation = Quaternion(x=1, y=0, z=0, w=0)
            rospy.loginfo(aruco_pose)
            pub2.publish(aruco_pose)
        else:
            cv2.putText(frame, "No Ids", (0, 64), font, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)
        # out.write(frame)

        # display the resulting frame
        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        rate.sleep()
Esempio n. 5
0
def inspectFrame(corners, ids, theID, markerLength, camMatrix, distCoef,
                 frame):
    n = len(ids) - 1
    for i in (0, n):
        if (ids[i] == theID):
            Rvec, Tvec = aruco.estimatePoseSingleMarkers(
                corners[i], markerLength, camMatrix, distCoef)
            aruco.drawDetectedMarkers(frame, [corners[i]])
            aruco.drawAxis(frame, camMatrix, distCoef, Rvec, Tvec, 5)
            R_ct = np.matrix(cv.Rodrigues(Rvec)[0])
            R_tc = R_ct.T
            #x_rot,y_rot,z_rot = rotationMatrixToEulerAngles(np.eye(3,dtype=np.float32)*R_tc)
    cv.imshow('frame', frame)
def get_M_Cmp(gray, M_Cm0, M_m0mP, visualize=False):
    markerLength = 0.063
    M_Cmp = []
    for tmp_M in M_m0mP:
        tmp_M_Cmp = np.dot(M_Cm0, tmp_M)
        if visualize:
            rvec, _ = cv2.Rodrigues(tmp_M_Cmp[:3, :3])
            tvec = tmp_M_Cmp[:3, 3]
            aruco.drawAxis(gray, cameraMatrix, distCoeffs, rvec, tvec,
                           markerLength)
        M_Cmp.append(tmp_M_Cmp)
        if OBJ_POSE_ONLY: break
    return M_Cmp
def acha_aruco(gray):

    #Função cujo objetivo é identificar os ids dos arucos da pista

    gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=parameters)
    print(ids)

    if ids is not None:
        #-- ret = [rvec, tvec, ?]
        #-- rvec = [[rvec_1], [rvec_2], ...] vetor de rotação
        #-- tvec = [[tvec_1], [tvec_2], ...] vetor de translação
        ret = aruco.estimatePoseSingleMarkers(corners, marker_size,
                                              camera_matrix, camera_distortion)
        rvec, tvec = ret[0][0, 0, :], ret[1][0, 0, :]
        #-- Desenha um retanculo e exibe Id do marker encontrado
        aruco.drawDetectedMarkers(temp_image, corners, ids)
        aruco.drawAxis(temp_image, camera_matrix, camera_distortion, rvec,
                       tvec, 1)
        #-- Print tvec vetor de tanslação em x y z
        str_position = "Marker x=%4.0f  y=%4.0f  z=%4.0f" % (tvec[0], tvec[1],
                                                             tvec[2])
        print(str_position)
        cv2.putText(temp_image, str_position, (0, 100), font, 1, (0, 255, 0),
                    1, cv2.LINE_AA)
        ##############----- Referencia dos Eixos------###########################
        # Linha referencia em X
        cv2.line(temp_image,
                 (int(temp_image.shape[1] / 2), int(temp_image.shape[0] / 2)),
                 ((int(temp_image.shape[1] / 2) + 50),
                  (int(temp_image.shape[0] / 2))), (0, 0, 255), 5)
        # Linha referencia em Y
        cv2.line(temp_image,
                 (int(temp_image.shape[1] / 2), int(temp_image.shape[0] / 2)),
                 ((int(temp_image.shape[1] / 2)),
                  (int(temp_image.shape[0] / 2) + 50)), (0, 255, 0), 5)

        #####################---- Distancia Euclidiana ----#####################
        # Calcula a distancia usando apenas a matriz tvec, matriz de tanslação
        # Pode usar qualquer uma das duas formas
        distance = np.sqrt(tvec[0]**2 + tvec[1]**2 + tvec[2]**2)
        distancenp = np.linalg.norm(tvec)
        #-- Print distance
        str_dist = "Dist aruco=%4.0f  dis.np=%4.0f" % (distance, distancenp)
        print(str_dist)
        cv2.putText(temp_image, str_dist, (0, 15), font, 1, (0, 255, 0), 1,
                    cv2.LINE_AA)

        return distance, ids
    return 1000, [[-1]]
Esempio n. 8
0
def track_aruco(frame):
    # operations on the frame
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # set dictionary size depending on the aruco marker selected
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)

    # detector parameters can be set here (List of detection parameters[3])
    parameters = aruco.DetectorParameters_create()
    parameters.adaptiveThreshConstant = 10

    # lists of ids and the corners belonging to each id
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=parameters)

    # font for displaying text (below)
    font = cv2.FONT_HERSHEY_SIMPLEX

    # check if the ids list is not empty
    # if no check is added the code will crash
    if np.all(ids != None):
        # estimate pose of each marker and return the values
        # rvet and tvec-different from camera coefficients
        rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
            corners, 0.05, mtx, dist)

        for i in range(0, ids.size):
            # draw axis for the aruco markers
            aruco.drawAxis(frame, mtx, dist, rvec[i], tvec[i], 0.1)

        # draw a square around the markers
        aruco.drawDetectedMarkers(frame, corners)

        # code to show ids of the marker found
        strg = ''
        for i in range(0, ids.size):
            strg += str(ids[i][0]) + ', '

        cv2.putText(frame, "Id: " + strg, (0, 64), font, 1, (0, 255, 0), 2,
                    cv2.LINE_AA)

    else:
        # code to show 'No Ids' when no markers are found
        cv2.putText(frame, "No Ids", (0, 64), font, 1, (0, 255, 0), 2,
                    cv2.LINE_AA)

    # display the resulting frame
    cv2.imshow('frame', frame)
    cv2.waitKey(1)
    return rvec, tvec
def aruco_routine(img, drone_tvec, drone_rvec):
    valid_ids = set(range(250))
    corners, ids, rvecs, tvecs = utils.find_markers(img, camera_matrix, dist_coef) #3d
    print(len(corners))
    aruc3d = frame.copy()
    if len(corners) > 0:
        utils.process_markers(drone_tvec, drone_rvec, corners, ids, rvecs, tvecs, valid_ids) #store
        rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, 0.33, camera_matrix, dist_coef)

        # Draw it
        for rvecs, tvecs in zip(rvec, tvec):
            aruco.drawAxis(aruc3d, camera_matrix, dist_coef, rvecs, tvecs, 0.33)
    
    cv2.imwrite("detection.jpg", aruc3d)
    def run(self):

        global dist_2_target

        while self._running:
            #-- Read the camera frame
            frame = self.vs.read()
            #frame = cv2.flip(image, 1) # Flip the picture horizontally
            #-- Convert in gray scale
            gray = cv2.cvtColor(
                frame, cv2.COLOR_BGR2GRAY
            )  #-- remember, OpenCV stores color images in Blue, Green, Red
            #-- Find all the aruco markers in the image
            corners, ids, rejected = aruco.detectMarkers(
                image=gray,
                dictionary=self.aruco_dict,
                parameters=self.parameters,
                cameraMatrix=self.camera_matrix,
                distCoeff=self.camera_distortion)
            if ids is not None and ids[0] == self.id_to_find:
                ret = aruco.estimatePoseSingleMarkers(corners,
                                                      self.marker_size,
                                                      self.camera_matrix,
                                                      self.camera_distortion)
                #-- Unpack the output, get only the first
                rvec, tvec = ret[0][0, 0, :], ret[1][0, 0, :]
                #-- Draw the detected marker and put a reference frame over it
                aruco.drawDetectedMarkers(frame, corners)
                aruco.drawAxis(frame, camera_matrix, camera_distortion, rvec,
                               tvec, 10)
                #-- Obtain the rotation matrix tag->camera
                R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
                R_tc = R_ct.T
                #-- Now get Position and attitude f the camera respect to the marker
                pos_camera = -R_tc * np.matrix(tvec).T
                str_position = "CAMERA Position x=%4.0f  y=%4.0f  z=%4.0f" % (
                    pos_camera[0], pos_camera[1], pos_camera[2])
                cv2.putText(frame, str_position, (0, 200), font, 1,
                            (0, 255, 0), 2, cv2.LINE_AA)
                dist_2_target = int(
                    pos_camera[2][0, 0])  # Distance to maker in cm

            #--- Display the frame
            cv2.imshow('frame', frame)

            #--- use 'q' to quit
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                cv2.destroyAllWindows()
                break
Esempio n. 11
0
def estimate_aruco_marker_pose(img_list,
                               calibration_data,
                               marker_size=50,
                               aruco_dict=aruco.DICT_4X4_250):
    """
    :param img_list:
    :param calibration_data: could be a yamlfilpath or a list [mtx, dist, rvecs, tvecs]
    :param marker_size:
    :param aruco_dict:
    :return:
    author: weiwei
    date: 2018, 20210516
    """
    if isinstance(calibration_data, str):
        mtx, dist, _, _, _ = yaml.load(open(calibration_data, 'r'),
                                       Loader=yaml.UnsafeLoader)
    elif isinstance(calibration_data, list):
        if len(calibration_data) == 4:
            mtx, dist, _, _ = calibration_data
        elif len(calibration_data) == 2:
            mtx, dist = calibration_data
        else:
            raise ValueError(
                "Calibration data must include focus-centeraxis matrix and distortion."
            )
    else:
        raise ValueError(
            "Calibration data must be a str (file path) or a list.")
    aruco_dict = aruco.Dictionary_get(aruco_dict)
    parameters = aruco.DetectorParameters_create()
    pos_list = []
    rotmat_list = []
    for img in img_list:
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            img, aruco_dict, parameters=parameters)
        if ids is not None:
            aruco.drawDetectedMarkers(img, corners, borderColor=[255, 255, 0])
            marker_rvecs, marker_tvecs, _objPoints = aruco.estimatePoseSingleMarkers(
                corners, marker_size, mtx, dist)
            aruco.drawAxis(img, mtx, dist, marker_rvecs[0],
                           marker_tvecs[0] / 1000.0, 0.1)
            pos = marker_tvecs[0][0].ravel()
            rotmat = cv2.Rodrigues(marker_rvecs[0])[0]
            pos_list.append(pos)
            rotmat_list.append(rotmat)
    average_pos = rm.posvec_average(pos_list)
    average_rotmat = rm.rotmat_average(rotmat_list)
    return average_pos, average_rotmat
Esempio n. 12
0
    def real_time_poseEstimation(self):
        """
        Method that display real time detection of the aruco markers with the pose estimation and id of each
        Returns:
        """
        cv2.namedWindow("Aruco")
        #frame = self.kinect.get_color()
        frame = self.kinect.get_color()  #[270:900,640:1400]
        rval = True

        while rval:
            gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            parameters = aruco.DetectorParameters_create()
            aruco_dict = aruco.Dictionary_get(self.aruco_dict)
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray, aruco_dict, parameters=parameters)
            if ids is not None:
                frame = aruco.drawDetectedMarkers(frame, corners, ids)
                # side lenght of the marker in meter
                rvecs, tvecs, trash = aruco.estimatePoseSingleMarkers(
                    corners, self._size_of_marker, self.mtx, self.dist)
                for i in range(len(tvecs)):
                    frame = aruco.drawAxis(frame, self.mtx, self.dist,
                                           rvecs[i], tvecs[i],
                                           self._length_of_axis)

            cv2.imshow("Aruco", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
            #frame = self.kinect.get_color()
            frame = self.kinect.get_color()  #[270:900,640:1400]

            key = cv2.waitKey(20)
            if key == 27:  # exit on ESC
                break

        cv2.destroyWindow("Aruco")
def detect():
    cameraMatrix = np.array([[8.2177218160147447e+02, 0., 3.4694289806342221e+02],[0., 8.2177218160147447e+02, 2.4795144956871457e+02],[0., 0., 1.]])
    distCoeffs = np.array([4.4824308523616324e-02, -7.4951985348854000e-01, 3.7539708742088725e-03, 8.8931335565222442e-03, 3.7214188475390984e+00])
    #型宣言
    rvecs = np.array([])
    tvecs = np.array([])
    
    # Capture frame-by-frame
    ret, frame = cap.read()
    #print(frame.shape) #480x640
    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()
    #print(parameters)
 
    '''    detectMarkers(...)
        detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
        mgPoints]]]]) -> corners, ids, rejectedImgPoints
        '''
        #lists of ids and the corners beloning to each id
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
    #print(corners)
 
    #It's working.
    # my problem was that the cellphone put black all around it. The alrogithm
    # depends very much upon finding rectangular black blobs

    if (corners != []): 
        rvecs, tvecs = aruco.estimatePoseSingleMarkers(corners, 0.05, cameraMatrix, distCoeffs)  
        frame = aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvecs, tvecs, 0.1)
        frame = aruco.drawDetectedMarkers(frame, corners, ids)
    return frame, rvecs, tvecs
Esempio n. 14
0
 def est_marker_pose(self, cv2_img):
     gray = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2GRAY)
     corners, ids, _ = aruco.detectMarkers(gray,
                                           aruco_dict,
                                           parameters=parameters)
     vis_img = aruco.drawDetectedMarkers(cv2_img,
                                         corners,
                                         ids,
                                         borderColor=(0, 0, 255))
     if ids is not None and len(ids) > 3:
         n_ids = len(ids)
         rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
             corners, PAD_CFG[ARGS.target]["marker_size"], K, DIST)
         rvecs, tvecs = rvecs.reshape((n_ids, 3)), tvecs.reshape((n_ids, 3))
         Ccv_X_Ts = np.zeros((4, 4, 4))
         X_Ts = np.zeros((4, 4, 4))
         n_pads = 0
         for i in range(4):
             Ccv_X_T_temp = self.cmpt_target_pose(rvecs, tvecs, ids,
                                                  self.pad_ids[str(i)])
             if Ccv_X_T_temp is not None:
                 n_pads += 1
                 X_T_temp = X_Cr.dot(Cr_X_Ccv).dot(Ccv_X_T_temp)
                 # print(X_T_temp[2, 3])
                 vis_img = aruco.drawAxis(
                     vis_img, K, DIST,
                     cv2.Rodrigues(Ccv_X_T_temp[0:3, 0:3])[0],
                     Ccv_X_T_temp[0:3, 3], 0.04)
                 vis_img = draw_BB(vis_img, X_T_temp)
                 #
                 Ccv_X_Ts[i, :, :] = Ccv_X_T_temp
                 X_Ts[i, :, :] = X_T_temp
         rospy.sleep(0.05)
         # Display our image
         return bridge.cv2_to_imgmsg(vis_img, 'rgb8')
def estimatePoseToMarker(_img, cameraMatrix, distCoeffs):

    img = _img.copy()

    # grayscale image
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Detect Aruco markers
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

    ## Keep ID 49 (the robot marker)
    corners, ids = keepMarkerById(corners, ids, 49)

    img = aruco.drawDetectedMarkers(img,
                                    corners,
                                    ids=ids,
                                    borderColor=(0, 0, 255))

    rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners=corners,
                                                    markerLength=0.1965,
                                                    cameraMatrix=cameraMatrix,
                                                    distCoeffs=distCoeffs)

    img = aruco.drawAxis(img, cameraMatrix, distCoeffs, rvec, tvec, 2)
    cv2.imwrite('calib_marker.png', img)

    return cv2.Rodrigues(rvec)[0], tvec.reshape(3, 1)
Esempio n. 16
0
    def update(self, frame):
        # transform image to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, self.dict, parameters=self.ar_params)

        markers = aruco.drawDetectedMarkers(gray, corners, ids)

        rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
            corners, self.markerLength, self.calibration_params[0],
            self.calibration_params[1])

        if rvecs is not None and tvecs is not None:
            print rvecs
            print tvecs
            self.debug_img = aruco.drawAxis(markers,
                                            self.calibration_params[0],
                                            self.calibration_params[1], rvecs,
                                            tvecs, 1)
            self.markerPose.append(tvecs)
        else:
            self.debug_img = markers

        # clear the stream in preparation for the next frame
        self.cap.truncate(0)
Esempio n. 17
0
def main():
    # Read Built-in Camera Feed
    cap = cv2.VideoCapture(0)
    while(cap.isOpened()):
        ret, im = cap.read()
        if (im is not None):

            # im = cv2.imread('images/marker.jpg', cv2.IMREAD_COLOR)
            marker_sz = 11.1    #cm (same units used to report tvecs)
            camera_matrix = np.array([[1776,0,762],[0,1780,1025],[0,0,1]],dtype=float)  #cx,cy ~= im.shape[1],im.shape[0]
            dist_coeffs = np.array([[0,0,0,0]],dtype=float) #need to be float type

            aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
            aruco_params = aruco.DetectorParameters_create()
            corners, ids, rejectedPts = aruco.detectMarkers(im, aruco_dict, parameters=aruco_params) #print np.squeeze(corners[0]).shape

            im = aruco.drawDetectedMarkers(im, corners, ids=ids, borderColor=1)

            rvecs, tvecs = aruco.estimatePoseSingleMarkers(corners, marker_sz, camera_matrix, dist_coeffs); #print rvecs
            if rvecs is not None:
                im = aruco.drawAxis(im, camera_matrix, dist_coeffs, rvecs, tvecs, marker_sz)

            cvDrawWindow('im',im, 1)
            if cv2.waitKey(1)==27:
                break

    cv2.destroyAllWindows()
Esempio n. 18
0
    def table_callback(self, rgb_img,ros_cloud):
        try:
            rgb_img = self.bridge.imgmsg_to_cv2(rgb_img,'bgr8')
            gray = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
            aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_250)
            parameters =  aruco.DetectorParameters_create()
            corners, ids, rejectedImgPoints = aruco.detectMarkers(gray,aruco_dict, parameters=parameters)
            criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
            for corner in corners:
                cv2.cornerSubPix(gray, corner, winSize = (3,3), zeroZone = (-1,-1), criteria = criteria)
            frame_markers = aruco.drawDetectedMarkers(rgb_img.copy(), corners, ids)
            markerLength = 0.8
            flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_FIX_ASPECT_RATIO)
            rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(corners, markerLength, cameramatrix, dist_coeffs)
            # result_matrix = np.zeros(shape=[4,4])
            # rotation_matrix = eulerAnglesToRotationMatrix(rvecs[0][0])
            # result_matrix[:3,:3] = rotation_matrix
            # result_matrix[:3,3] = tvecs[0]
            # result_matrix[3,3] = 1.
            length_of_axis = 0.6
            total_center = np.zeros(shape = [4,1,2])
            for index in range(len(ids)):
                center = np.array(np.mean(corners[index][0],axis = 0), dtype = np.int)
                total_center[ids[index]-1] = center
            imaxis = aruco.drawDetectedMarkers(rgb_img.copy(), corners, ids)
            for i in range(len(tvecs)):
                imaxis = aruco.drawAxis(imaxis, cameramatrix, dist_coeffs, rvecs[i], tvecs[i], length_of_axis)
            # Display the resulting frame
            self.ar_pub.publish(self.bridge.cv2_to_imgmsg(imaxis,'bgr8'))

            print('height: {}'.format(ros_cloud.height))
            print('width: {}'.format(ros_cloud.width))
Esempio n. 19
0
    def draw_axes(self, frame, detections: ArucoPoseDetections):
        if self.camera is None:
            raise MissingParametersError()

        rvecs = detections.rvecs
        tvecs = detections.tvecs

        if rvecs is None:
            return frame

        for i in range(len(rvecs)):
            rvec = rvecs[i]
            tvec = tvecs[i]

            try:
                frame = aruco.drawAxis(image=frame,
                                       cameraMatrix=self.camera.camera_matrix,
                                       distCoeffs=self.camera.dist_coeffs,
                                       rvec=rvec,
                                       tvec=tvec,
                                       length=self.context.marker_length)
            except cv2.error:
                # print('error drawing axis')
                pass

        return frame
Esempio n. 20
0
def detect(frame, camera_matrix, dist_coefs):
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        frame_gray, aruco_dict, parameters=parameters)

    rendered_img = frame
    rvecs = []
    tvecs = []
    rvec = []
    tvec = []
    if corners != []:
        rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
            corners, size_marker, camera_matrix, dist_coefs)

        # Hypothesis: we have only the chessboard marker, so we take only the first result
        rvec = rvecs[0]
        tvec = tvecs[0]
        rendered_img = aruco.drawAxis(rendered_img, camera_matrix, dist_coefs,
                                      rvec, tvec, 10.0)

        rvec, tvec = getCorrectVectors(rvec, tvec)
    else:
        rvec, tvec = getCorrectVectors(rvec, tvec)

    return (np.array([rvec]), np.array([tvec]), rendered_img)
def detect_marker_pose(img, corners, ids, camera_type, display=False):

    distCoeffs = np.zeros((5,1))
    if camera_type == "D415": 
        # default camera_matrix from realsense ros wrapper
        camera_mtx = np.array([[931.7362060546875, 0.0, 622.6597900390625],
                                [0.0, 931.1814575195312, 354.47479248046875],
                                [0.0, 0.0, 1.0]])

    if camera_type == "D435":
        camera_mtx = np.array([[617.0361328125, 0.0, 327.0294189453125],
                                [0.0, 617.2791137695312, 237.9000701904297],
                                [0.0, 0.0, 1.0]])
    size_of_marker =  0.04 # side lenght of the marker in meters
    length_of_axis = 0.1
    rvecs,tvecs = aruco.estimatePoseSingleMarkers(corners, size_of_marker, camera_mtx, distCoeffs)
    img_axis = aruco.drawDetectedMarkers(img.copy(), corners, ids)
    
    for i in range(len(tvecs)):
        img_axis = aruco.drawAxis(img_axis, camera_mtx, distCoeffs, rvecs[i], tvecs[i], length_of_axis)

    if display==True:
        plt.figure()
        plt.imshow(img_axis)
        plt.grid()
        plt.show()

    return img_axis, rvecs, tvecs
Esempio n. 22
0
    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
        global camera_angle_x, camera_angle_y
        markerLength = 5

        #(rows,cols,channels) = cv_image.shape
        # if cols > 60 and rows > 60 :
        #  cv2.circle(cv_image, (50,50), 10, 255)

        #cv2.imshow("Image window", cv_image)
        # cv2.waitKey(3)

        # Load the camera coefficients etc
        with np.load('B.npz') as X:
            mtx, dist, _, _ = [X[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]

        # Make the image gray for ArUco tag detection
        gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        # Assign the dictionary we wish to use (6 bit, with 250 separate tags)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        parameters = aruco.DetectorParameters_create()

        # lists of ids and the corners belonging to each id
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        #aruco.drawDetectedMarkers(cv_image, corners, ids)

        # this if statement lets us check if we have detected a tag by checking the size
        # of the corners list to see if it's greater than 0. If it is, then we want to
        # find the center position of the tag and then correct the camera angle to center
        # it by publishing back a new angle to the control_camera topic.
        if (len(corners) != 0):
            # Draw on the markers so we can see they've been detected
            gray = aruco.drawDetectedMarkers(cv_image, corners, ids)
            rvec, tvec = aruco.estimatePoseSingleMarkers(
                corners, markerLength, mtx, dist)  # For a single marker
            gray = aruco.drawAxis(gray, mtx, dist, rvec, tvec, 10)
            aruco_code_center_pixel = self.tag_center(corners)
            camera_angle_x, camera_angle_y = self.camera_angle_correction(
                aruco_code_center_pixel, camera_angle_x, camera_angle_y)

            self.publish_the_message(
                camera_angle_x, camera_angle_y
            )  # use this method to publish the desired camera angle the the /bebop/camera_control topic

        # Display the video feed frames every 3 ms.
        cv2.imshow("Image window", cv_image)
        cv2.imshow("Gray window", gray)
        cv2.waitKey(5)

        # Publish the image back into ROS image message type (not sure why, I guess it's if you
        # want to do something else with it after using OpenCV).
        try:
            self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))

        except CvBridgeError as e:
            print(e)
Esempio n. 23
0
    def draw_pose(self, image, matrix, distortion):
        if self.set:
            image_draw = aruco.drawAxis(image, matrix, distortion, self.rvec, self.tvec, 0.1)
        else:
            image_draw = image

        return image_draw
Esempio n. 24
0
def image_callback(ros_image):
    global initialized, initial_rot, initial_trans

    bridge = CvBridge()
    image = bridge.imgmsg_to_cv2(ros_image, desired_encoding='bgr8')

    #converting to grayscale
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    corners = []
    parameters = aruco.DetectorParameters_create()
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=parameters)
    if corners:
        frame_markers = aruco.drawDetectedMarkers(image.copy(), corners, ids)

        size_of_marker = 0.181  # side lenght of the marker in meter
        rvecs, tvecs, trash = aruco.estimatePoseSingleMarkers(
            corners, size_of_marker, mtx, dist)

        frame_markers = aruco.drawAxis(frame_markers, mtx, dist, rvecs, tvecs,
                                       size_of_marker)
        aruco_image = bridge.cv2_to_imgmsg(frame_markers, encoding="bgr8")
        aruco_image_publisher.publish(aruco_image)

        rvecs = np.reshape(rvecs, (3, ))
        tvecs = np.reshape(tvecs, (3, ))

        cam_to_aruco_R = Rotation.from_rotvec(rvecs)
        cam_to_aruco_dcm = cam_to_aruco_R.as_dcm()
        aruco_to_cam_dcm = cam_to_aruco_dcm.transpose()
        aruco_to_cam_position = -np.matmul(aruco_to_cam_dcm, tvecs)
        aruco_to_cam_R = Rotation.from_dcm(aruco_to_cam_dcm)

        if not initialized:
            initial_trans = aruco_to_cam_position
            initial_rot = aruco_to_cam_dcm

        aruco_to_cam_position = np.matmul(
            aruco_to_cam_dcm, aruco_to_cam_position - initial_trans)
        aruco_to_cam_R = Rotation.from_dcm(
            np.matmul(initial_rot, aruco_to_cam_dcm.transpose()))

        aruco_to_cam_quat = aruco_to_cam_R.as_quat()

        camera_pose.header.frame_id = "aruco_link"
        camera_pose.header.stamp = rospy.Time.now()

        camera_pose.pose.position.x = aruco_to_cam_position[0]
        camera_pose.pose.position.y = aruco_to_cam_position[1]
        camera_pose.pose.position.z = aruco_to_cam_position[2]

        camera_pose.pose.orientation.x = aruco_to_cam_quat[0]
        camera_pose.pose.orientation.y = aruco_to_cam_quat[1]
        camera_pose.pose.orientation.z = aruco_to_cam_quat[2]
        camera_pose.pose.orientation.w = aruco_to_cam_quat[3]

        initialized = True

        return initialized
 def aruco(self, frame):
     #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
     corners, ids, rejectedImgPoints = aruco.detectMarkers(
         frame, self.aruco_dict, parameters=self.parameters)
     frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
     rvecs, tvecs, trash = aruco.estimatePoseSingleMarkers(
         corners, self.size_of_marker, self.mtx, self.dist)
     imaxis = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
     if tvecs is not None:
         for i in range(len(tvecs)):
             imaxis = aruco.drawAxis(imaxis, self.mtx, self.dist, rvecs[i],
                                     tvecs[i], self.length_of_axis)
             rvec = np.squeeze(rvecs[0], axis=None)
             tvec = np.squeeze(tvecs[0], axis=None)
             tvec = np.expand_dims(tvec, axis=1)
             rvec_matrix = cv2.Rodrigues(rvec)[0]
             proj_matrix = np.hstack((rvec_matrix, tvec))
             euler_angles = cv2.decomposeProjectionMatrix(proj_matrix)[6]
             cv2.putText(imaxis, 'X: ' + str(int(euler_angles[0])),
                         (10, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                         (0, 0, 255))
             cv2.putText(imaxis, 'Y: ' + str(int(euler_angles[1])),
                         (115, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                         (0, 255, 0))
             cv2.putText(imaxis, 'Z: ' + str(int(euler_angles[2])),
                         (200, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                         (255, 0, 0))
     cv2.imshow('Aruco', imaxis)
def main():
    im = cv2.imread('images/IMG_0081.jpg', cv2.IMREAD_COLOR)
    marker_sz = 11.1  #cm (same units used to report tvecs)
    camera_matrix = np.array([[1776, 0, 762], [0, 1780, 1025], [0, 0, 1]],
                             dtype=float)  #cx,cy ~= im.shape[1],im.shape[0]
    dist_coeffs = np.array([[0, 0, 0, 0]], dtype=float)  #need to be float type

    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    aruco_params = aruco.DetectorParameters_create()
    corners, ids, rejectedPts = aruco.detectMarkers(
        im, aruco_dict,
        parameters=aruco_params)  #print np.squeeze(corners[0]).shape

    im = aruco.drawDetectedMarkers(im, corners, ids=ids, borderColor=1)

    rvecs, tvecs = aruco.estimatePoseSingleMarkers(corners, marker_sz,
                                                   camera_matrix, dist_coeffs)
    #print rvecs
    if rvecs is not None:
        im = aruco.drawAxis(im, camera_matrix, dist_coeffs, rvecs, tvecs,
                            marker_sz)

    cvDrawWindow('im', im, .3, 1)

    cv2.destroyAllWindows()
Esempio n. 27
0
    def get_board_range(self, board):
        detected_board_ids     = list()
        detected_board_corners = list()

        if(self.get_visible_ids() is not None):
            for i in range(len(self.get_visible_ids())):
                detected_id     = self.get_visible_ids()[i]
                detected_corner = self.corners[i]

                if(detected_id in board.ids.tolist()):
                    detected_board_ids.append(detected_id)
                    detected_board_corners.append(detected_corner)

        if(detected_board_ids):
            detected_board_ids     = np.asarray(detected_board_ids)
            detected_board_corners = np.asarray(detected_board_corners)
            retval, rvec, tvec = aruco.estimatePoseBoard(detected_board_corners,
                                                         detected_board_ids, board,
                                                         self.camera_matrix, self.dist_coeffs)
            self.frame = aruco.drawAxis(
                self.frame,
                self.camera_matrix,
                self.dist_coeffs,
                rvec,
                tvec,
                self.axis_length_inches)
            distance_to_board = np.linalg.norm(tvec)
            return distance_to_board
        # board not visible, distance unknown
        return None
Esempio n. 28
0
def gen():
    """Video streaming generator function."""
    while True:

        # read camera frame
        ret, frame = cap.read()
        
        # convert gray scale
        gray    = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #-- remember, OpenCV stores color images in Blue, Green, Red
        #-- Find all the aruco markers in the image
        corners, ids, rejected = aruco.detectMarkers(image=gray, dictionary=aruco_dict, parameters=parameters,
                              cameraMatrix=camera_matrix, distCoeff=camera_distortion)

        #-- Unpack the output, get only the first

        
        if ids is not None and ids[0] == id_to_find:

            ret1 = aruco.estimatePoseSingleMarkers(corners, marker_size, camera_matrix, camera_distortion)

        #-- Unpack the output, get only the first
            rvec, tvec = ret1[0][0,0,:], ret1[1][0,0,:]

        #-- Get the attitude
            aruco.drawDetectedMarkers(frame, corners)
            aruco.drawAxis(frame, camera_matrix, camera_distortion, rvec, tvec, 10)
            str_position = "MARKER Position x=%4.0f  y=%4.0f  z=%4.0f"%(tvec[0], tvec[1], tvec[2])
            cv2.putText(frame, str_position, (0, 100), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
        
        #-- Obtain the rotation matrix tag->camera
            R_ct    = np.matrix(cv2.Rodrigues(rvec)[0])
            R_tc    = R_ct.T

            pos_camera = -R_tc*np.matrix(tvec).T

            str_position = "CAMERA Position x=%4.0f  y=%4.0f  z=%4.0f"%(pos_camera[0], pos_camera[1], pos_camera[2])
            cv2.putText(frame, str_position, (0, 200), font, 1, (0, 255, 0), 2, cv2.LINE_AA)




            cv2.putText(frame, str_position, (0, 200), font, 1, (0, 255, 0), 2, cv2.LINE_AA)

        encode_return_code, image_buffer = cv2.imencode('.jpg', frame)
        io_buf = io.BytesIO(image_buffer)
        yield (b'--frame\r\n'
              b'Content-Type: image/jpeg\r\n\r\n' + io_buf.read() + b'\r\n')
Esempio n. 29
0
    def trackAruco(self):
        corners, ids, rejected = aruco.detectMarkers(image=self.gray, dictionary=self.aruco_dict, parameters=self.parameters)
        

        self.pose.marker_id = -1
        self.pose.distance = 0
        self.pose.roll = 0
        self.pose.pitch = 0
        self.pose.yaw = 0

        if not ids is None:
            rvec = []
            tvec = []
            ret = aruco.estimatePoseSingleMarkers(corners, self.marker_size, self.camera_matrix, self.camera_distortion)
            for markers in ret[0]:
                rvec.append(markers[0])
            for markers in ret[1]:
                tvec.append(markers[0])     
            #print(rvec)
            # only for video
            aruco.drawDetectedMarkers(self.frame, corners)
            for i in range(len(rvec)):
                aruco.drawAxis(self.frame, self.camera_matrix, self.camera_distortion, rvec[i], tvec[i], 5)

            for i in range(len(ids)):
                x = tvec[i][0]
                z = tvec[i][2]
                    
                R_ct = np.matrix(cv2.Rodrigues(rvec[i])[0])
                R_tc = R_ct.T

                roll_camera, pitch_camera, yaw_camera = self.rotationMatrixToEulerAngles(self.R_flip*R_tc)

                self.pose.marker_id = int(ids[i][0])
                self.pose.distance = int(z)
                self.pose.roll = int(math.degrees(roll_camera))
                self.pose.pitch = int(math.degrees(pitch_camera))
                self.pose.yaw = int(math.degrees(yaw_camera))

            str_attitude = 'CAMERA Attitude r=%4.0f p=%4.0f y=%4.0f'%(math.degrees(roll_camera),math.degrees(pitch_camera), math.degrees(yaw_camera))
            cv2.putText(self.frame, str_attitude, (0,250),self.font,1,(0,255,0),2,cv2.LINE_AA)

            str_position = 'Marker Position x=%4.0f y=%4.0f z=%4.0f'%(tvec[0][0], tvec[0][1], tvec[0][2])
            cv2.putText(self.frame, str_position, (0,150),self.font,1,(0,255,0),2,cv2.LINE_AA)
        
            marker = 'Marker ID %4.0f'%(ids[i][0])
            cv2.putText(self.frame, marker, (0,200), self.font,1,(0,255,0),2,cv2.LINE_AA)
    def run_flann_matcher(self):

        # FLANN parameters
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)  # or pass empty dictionary
        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(np.asarray(self.__marker_desc, np.float32),
                                 np.asarray(self.__input_desc, np.float32),
                                 k=2)
        # Need to draw only good matches, so create a mask
        matchesMask = [[0, 0] for i in range(len(matches))]

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([
                self.__marker_kp[m.queryIdx].pt for m in good
            ]).reshape(-1, 1, 2)
            dst_pts = np.float32([
                self.__input_kp[m.trainIdx].pt for m in good
            ]).reshape(-1, 1, 2)

            H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            matchesMask = mask.ravel().tolist()

            h, w, _ = self.in_image.shape
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            dst = cv2.perspectiveTransform(pts, H)

            num, self.r_vec, self.t_vec, Ns = cv2.decomposeHomographyMat(
                H, self.__cam_mat)
            print(self.__in_image.shape)

            if (self.__json_params["debug_draw"] == True):

                aruco.drawAxis(self.in_image, self.__cam_mat, self.__dist_mat,
                               self.r_vec[1], self.t_vec[1], 0.1)  # Draw Axis
                self.in_image = cv2.polylines(self.in_image, [np.int32(dst)],
                                              True, 255, 3, cv2.LINE_AA)
def estimatePoseToBoard(_img, cameraMatrix, distCoeffs):

    img = _img.copy()

    # grayscale image
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Detect Aruco markers
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

    # Refine detected markers
    # Eliminates markers not part of our board, adds missing markers to the board
    corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
        image=gray,
        board=CHARUCO_BOARD,
        detectedCorners=corners,
        detectedIds=ids,
        rejectedCorners=rejectedImgPoints,
        cameraMatrix=cameraMatrix,
        distCoeffs=distCoeffs)

    ## REMOVE ID 49 (the robot marker)
    corners, ids = removeMarkerById(corners, ids, 49)

    img = aruco.drawDetectedMarkers(img,
                                    corners,
                                    ids=ids,
                                    borderColor=(0, 0, 255))

    rvec, tvec = None, None

    # Only try to find CharucoBoard if we found markers
    if ids is not None and len(ids) > 10:

        # Get charuco corners and ids from detected aruco markers
        response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(
            markerCorners=corners,
            markerIds=ids,
            image=gray,
            board=CHARUCO_BOARD)

        # Require more than 20 squares
        if response is not None and response > 20:
            # Estimate the posture of the charuco board
            pose, rvec, tvec = aruco.estimatePoseCharucoBoard(
                charucoCorners=charuco_corners,
                charucoIds=charuco_ids,
                board=CHARUCO_BOARD,
                cameraMatrix=cameraMatrix,
                distCoeffs=distCoeffs)

            img = aruco.drawAxis(img, cameraMatrix, distCoeffs, rvec, tvec, 2)
            cv2.imwrite('calib_board.png', img)

    else:
        print('Calibration board is not fully visible')
        assert 1 == 0

    return cv2.Rodrigues(rvec)[0], tvec
Esempio n. 32
0
def getPose(im, board, m, d):
    if len(im.shape) > 2:
        gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    else:
        gray = im
    # parameters = aruco.DetectorParameters_create()
    # corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary, parameters=parameters)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary)
    ret, chcorners, chids = aruco.interpolateCornersCharuco(corners, ids, gray, board)
    ret, rvecs, tvecs = aruco.estimatePoseCharucoBoard(chcorners, chids, board,m,d)
    im = aruco.drawAxis(im, m, d, rvecs, tvecs,0.5)
    cv2.imshow('markers', im)
    cv2.waitKey()