예제 #1
0
cam.set(cv2.CAP_PROP_FPS, FPS)
arudict = aruco.custom_dictionary(8, 3)
logitech_cam_matrix = np.array([6.6419013281274511e+02, 0., 3.3006505461506646e+02, 0.,
                                6.6587003740196826e+02, 2.3030870519881907e+02, 0., 0., 1.])
logitech_cam_matrix = logitech_cam_matrix.reshape((3, 3))
logitech_dist_coeffs = np.array([2.0806786957819065e-01, -3.6839605435678208e-01,
                                 -1.2078578161331370e-02, 6.4294407773653481e-03,
                                 4.2723310854154123e-01])

car_board0 = aruco.Board_create(objPoints, arudict, np.arange(0, 4))
car_board1 = aruco.Board_create(objPoints, arudict, np.arange(4, 8))

while True:
    ret, frame = cam.read()
    corners, ids, rejected = aruco.detectMarkers(frame, arudict)
    corners, ids, rejected, recovered = aruco.refineDetectedMarkers(frame, car_board0, corners, ids, rejected,
                                                                    logitech_cam_matrix, logitech_dist_coeffs)

    corners, ids, rejected, recovered = aruco.refineDetectedMarkers(frame, car_board1, corners, ids, rejected,
                                                                    logitech_cam_matrix, logitech_dist_coeffs)
    aruco.drawDetectedMarkers(frame, corners, ids)
    # rvecs, tvecs = aruco.estimatePoseSingleMarkers(corners, sd, camera_matrix, dist_coeffs)
    N0, rvec0, tvec0 = aruco.estimatePoseBoard(corners, ids, car_board0, logitech_cam_matrix, logitech_dist_coeffs)
    N1, rvec1, tvec1 = aruco.estimatePoseBoard(corners, ids, car_board1, logitech_cam_matrix, logitech_dist_coeffs)
    if N0:
        # for i in range(len(ids)):
        #     aruco.drawAxis(frame, camera_matrix, dist_coeffs, rvecs[i], tvecs[i], 0.4)
        mat, jacob = cv2.Rodrigues(rvec0)
        dash = np.full((400, 600), 255, dtype=np.uint8)
        putTextMultiline(dash, repr(mat), (20, 20))
        putTextMultiline(dash, repr(tvec0), (400, 200))
        putTextMultiline(dash, repr(rvec0), (20, 200))
예제 #2
0
def detection_markers():
    """
    
    """
    # Constant parameters used in Aruco methods
    ARUCO_PARAMETERS = aruco.DetectorParameters_create()
    ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_5X5_1000)

    pckl_file_exist = False
    if not os.path.exists('calibration.pckl'):
        print("You need to calibrate the camera you'll be using. See \
              calibration project directory for details.")
        exit()
    else:
        pckl_file_exist = True
        f = open('calibration.pckl', 'rb')
        (cameraMatrix, distCoeffs) = pickle.load(f)
        f.close()
        if cameraMatrix is None or distCoeffs is None:
            print(
                "Calibration issue. Remove ./calibration.pckl and recalibrate\
                  your camera with CalibrateCamera.py.")
            exit()

    # Create grid board object we're using in our stream
    board = aruco.GridBoard_create(markersX=5,
                                   markersY=7,
                                   markerLength=0.04,
                                   markerSeparation=0.01,
                                   dictionary=ARUCO_DICT)

    # Create vectors we'll be using for rotations and translations for postures
    rvecs, tvecs = None, None

    cam = cv2.VideoCapture(0)

    while (cam.isOpened() and pckl_file_exist):

        # Exit at the end of the video on the 'q' keypress
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        # Capturing each frame of our video stream
        ret, img = cam.read()
        if ret == True:
            # grayscale image
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            # Detect Aruco markers
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

            # Refind not detected markers based on the already detected and
            # the board layout.
            corners, ids, _, _ = aruco.refineDetectedMarkers(
                image=gray,
                board=board,
                detectedCorners=corners,
                detectedIds=ids,
                rejectedCorners=rejectedImgPoints,
                cameraMatrix=cameraMatrix,
                distCoeffs=distCoeffs)

            side_len = 0.5
            axis = np.float32([[-side_len, -side_len, 0],
                               [-side_len, side_len,
                                0], [side_len, side_len, 0],
                               [side_len, -side_len, 0],
                               [-side_len, -side_len, 1],
                               [-side_len, side_len,
                                1], [side_len, side_len, 1],
                               [side_len, -side_len, 1]])

            # Draw cube only when there is only one marker in a frame
            if ids is not None and len(ids) == 1:
                rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                    corners, ids, cameraMatrix, distCoeffs)

                # The function computes projections of 3D points to the image
                # plane given intrinsic and extrinsic camera parameters.
                # imgpts - output array of image points in camera coordinate
                # system (x and y)
                imgpts, _ = cv2.projectPoints(axis, rvecs, tvecs, cameraMatrix,
                                              distCoeffs)

                # Display image with a cube
                draw(img, imgpts, corners)
                cv2.waitKey(2)

    cv2.destroyAllWindows()
예제 #3
0
    def run(self):
        # Initialise subscribing topics
        self.init_ros_topics()

        # Wait for cameras to be ready before going ahead
        while ((self.cam1_state != "ready") and (not rospy.is_shutdown())): # or (self.cam2_state != "ready")):
            continue

        if rospy.is_shutdown():
            return

        # load camera calibration info from cameras
        calib = rospy.wait_for_message('/camera/color/camera_info', CameraInfo)
        # calib2 = rospy.wait_for_message('/camera2/color/camera_info', CameraInfo)

        mtx = np.reshape(calib.K, [3,3])
        dist = np.array([calib.D])

        # marker size in meters.
        board_square_length = 0.032 # 0.042
        board_marker_size = 0.0265 # 0.034
        robot_square_length = 0.161
        robot_marker_size = 0.140
        robot_left_square_length = 0.134
        robot_left_marker_size = 0.104
        watch_square_length = 0.07 #0.0595
        watch_marker_size = 0.0535#0.0455#0.043

        # robot on the right
        # initialize array to store location data for base markers in the camera's frame.
        base_marker1 = np.array([0,0,0])
        base_marker2 = np.array([0,0,0])
        base_marker3 = np.array([0,0,0])

        # robot on the left
        #initialize location data for base markers in the camera's frame.
        base_marker_l1 = np.array([0,0,0])
        base_marker_l2 = np.array([0,0,0])
        base_marker_l3 = np.array([0,0,0])

        # watch
        #initialize location data for base markers in the camera's frame.
        watch_base_marker1 = np.array([0,0,0])
        watch_base_marker2 = np.array([0,0,0])
        watch_base_marker3 = np.array([0,0,0])
        watch_base_marker4 = np.array([0,0,0])

        # initilaize location data for charuco board
        board_origin = np.array([0,0,0])

        # defining different dictionaries to use for different coordinate systems
        aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_1000)
        aruco_dict_robot_l = aruco.Dictionary_get(aruco.DICT_6X6_250)
        aruco_dict_robot_r = aruco.Dictionary_get(aruco.DICT_4X4_250)
        aruco_dict_watch = aruco.Dictionary_get(aruco.DICT_7X7_250)

        board = aruco.CharucoBoard_create(9, 6, board_square_length, board_marker_size, aruco_dict)
        charuco_robot_r = aruco.CharucoBoard_create(3, 3, robot_square_length, robot_marker_size, aruco_dict_robot_r)
        charuco_robot_l = aruco.CharucoBoard_create(3, 3, robot_left_square_length, robot_left_marker_size, aruco_dict_robot_l)
        charuco_watch = aruco.CharucoBoard_create(3, 3, watch_square_length, watch_marker_size, aruco_dict_watch)
        parameters = aruco.DetectorParameters_create()

        # filter
        a_filter = 0.95

        # Initialization of variables
        Tb_r = np.identity(4)
        rot_mat = np.identity(3)
        robot_origin = np.array([[0, 0, 0]])
        Tb_l = np.identity(4)
        rot_mat_l = np.identity(3)
        robot_origin_l = np.array([[0, 0, 0]])
        Tb_w = np.identity(4)
        rot_mat_w = np.identity(3)
        watch_origin = np.array([[0, 0, 0]])

        rate = rospy.Rate(30)
        while not rospy.is_shutdown():
            QueryImg = copy.deepcopy(self.cam1['rgb'])
            # cv2.imwrite('./out/imgA_{}.png'.format(self.fr1), QueryImg)

            # grayscale image
            gray = cv2.cvtColor(QueryImg, cv2.COLOR_BGR2GRAY)

            # Detect Aruco markers, _r refers to robot on the right, _l referes to robot on the left.
            corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
            corners_r, ids_r, rejectedImgPoints_r = aruco.detectMarkers(gray, aruco_dict_robot_r, parameters=parameters)
            corners_l, ids_l, rejectedImgPoints_l = aruco.detectMarkers(gray, aruco_dict_robot_l,  parameters=parameters)

            watch_corners, watch_ids, watch_rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict_watch,  parameters=parameters)

            # Refine detected markers
            # Eliminates markers not part of our board, adds missing markers to the board
            corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
                    image = gray,
                    board = board,
                    detectedCorners = corners,
                    detectedIds = ids,
                    rejectedCorners = rejectedImgPoints,
                    cameraMatrix = mtx,
                    distCoeffs = dist)

            corners_r, ids_r, rejectedImgPoints_r, recoveredIds_r = aruco.refineDetectedMarkers(
                    image = gray,
                    board = charuco_robot_r,
                    detectedCorners = corners_r,
                    detectedIds = ids_r,
                    rejectedCorners = rejectedImgPoints_r,
                    cameraMatrix = mtx,
                    distCoeffs = dist)
            corners_l, ids_l, rejectedImgPoints_l, recoveredIds_l = aruco.refineDetectedMarkers(
                    image = gray,
                    board = charuco_robot_l,
                    detectedCorners = corners_l,
                    detectedIds = ids_l,
                    rejectedCorners = rejectedImgPoints_l,
                    cameraMatrix = mtx,
                    distCoeffs = dist)

            watch_corners, watch_ids, watch_rejectedImgPoints, watch_recoveredIds = aruco.refineDetectedMarkers(
                    image = gray,
                    board = charuco_watch,
                    detectedCorners = watch_corners,
                    detectedIds = watch_ids,
                    rejectedCorners = watch_rejectedImgPoints,
                    cameraMatrix = mtx,
                    distCoeffs = dist)

            # Outline all of the markers detected in our image
            # left camera view
            QueryImg = aruco.drawDetectedMarkers(QueryImg, corners, borderColor=(0, 0, 255))
            QueryImg = aruco.drawDetectedMarkers(QueryImg, corners_r, borderColor=(0, 0, 255))
            QueryImg = aruco.drawDetectedMarkers(QueryImg, corners_l, borderColor=(0, 0, 255))
            QueryImg = aruco.drawDetectedMarkers(QueryImg, watch_corners, borderColor=(0, 0, 255))

            # Valid flags
            valid_board = False
            valid_robot_r = False
            valid_robot_l = False
            valid_watch = False


            # Detect the board, requires more than 3 markers on the board to be visible
            if ids is not None and len(ids) > 3:
                # Estimate the posture of the gridboard, seen from the left camera.
                pose, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, mtx, dist)
                if pose:
                    QueryImg = aruco.drawAxis(QueryImg, mtx, dist, rvec, tvec, board_marker_size)
                    board_to_cam_rot_mtx, board_origin = transform_from_board_to_camera(rvec, tvec, a_filter, board_origin)
                    valid_board = True

            # detect robot base markers
            if ids_r is not None and len(ids_r) >= 3:
                rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(corners_r, robot_marker_size, mtx, dist)
                for ident, rvec, tvec in zip(ids_r, rvecs, tvecs):
                    if ident[0] == 0 or ident[0] == 1 or ident[0] == 3:
                        t = transform_from_base_to_camera(rvec, tvec)
                        base_marker1, base_marker2, base_marker3 = apply_filter(a_filter, ident[0], t, base_marker1, base_marker2, base_marker3)
                        QueryImg = aruco.drawAxis(QueryImg, mtx, dist, rvec, tvec, 0.02)
                x_axis, y_axis, z_axis, robot_origin = construct_new_axis_robot(base_marker2, base_marker1, base_marker3, False)
                # rot_mat is from robot to camera
                rot_mat = np.transpose(np.array([[x_axis[0], x_axis[1], x_axis[2]],[y_axis[0], y_axis[1], y_axis[2]], [z_axis[0], z_axis[1], z_axis[2]]]))
                board_origin_in_robot_frame = compute_board_origin_in_base_frame(board_origin, rot_mat, robot_origin)
                try:
                    Tb_r = compute_transformation_from_board_to_base(board_to_cam_rot_mtx, rot_mat, board_origin_in_robot_frame)
                    valid_robot_r = True
                except:
                    pass

            # detect left robot base markers
            if ids_l is not None and len(ids_l) >= 3:
                rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(corners_l, robot_left_marker_size, mtx, dist)
                for ident, rvec, tvec in zip(ids_l, rvecs, tvecs):
                    #QueryImg = aruco.drawAxis(QueryImg, mtx, dist, rvec, tvec, 0.04)
                    if ident[0] == 0 or ident[0] == 2 or ident[0] == 3:
                        t = transform_from_base_to_camera(rvec, tvec)
                        base_marker_l1, base_marker_l2, base_marker_l3 = apply_filter(a_filter, ident[0], t, base_marker_l1, base_marker_l2, base_marker_l3)
                        QueryImg = aruco.drawAxis(QueryImg, mtx, dist, rvec, tvec, 0.02)
                x_axis, y_axis, z_axis, robot_origin_l = construct_new_axis_robot(base_marker_l2, base_marker_l1, base_marker_l3, True)
                # rot_mat_l is from robot to camera
                rot_mat_l = np.transpose(np.array([[x_axis[0], x_axis[1], x_axis[2]],[y_axis[0], y_axis[1], y_axis[2]], [z_axis[0], z_axis[1], z_axis[2]]]))
                board_origin_in_robot_frame = compute_board_origin_in_base_frame(board_origin, rot_mat_l, robot_origin_l)
                try:
                    Tb_l =  compute_transformation_from_board_to_base(board_to_cam_rot_mtx, rot_mat_l, board_origin_in_robot_frame)
                    valid_robot_l = True
                except:
                    pass

            # detect watch face markers
            if watch_ids is not None and len(watch_ids) >= 4:
                rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(watch_corners, watch_marker_size, mtx, dist)
                for ident, rvec, tvec in zip(watch_ids, rvecs, tvecs):
                    if ident[0] == 0 or ident[0] == 1 or ident[0] == 3 or ident[0] == 2:
                        t = transform_from_base_to_camera(rvec, tvec)
                        watch_base_marker1, watch_base_marker2, watch_base_marker3, watch_base_marker4 = apply_filter_watch(a_filter, ident[0], t, watch_base_marker1, watch_base_marker2, watch_base_marker3, watch_base_marker4)
                        QueryImg = aruco.drawAxis(QueryImg, mtx, dist, rvec, tvec, 0.02)
                BA = watch_base_marker2 - watch_base_marker1
                DA = watch_base_marker3 - watch_base_marker1
                normal = np.cross(DA, BA)
                nnorm = np.linalg.norm(normal)
                if nnorm > 1e-6:
                    normal /= nnorm
                # compute the center point of all four markers
                watch_origin = compute_center_from_four_points(watch_base_marker2, watch_base_marker3, watch_base_marker1, watch_base_marker4)
                x_axis = watch_base_marker1 - watch_origin
                x_axis /= np.linalg.norm(x_axis)
                y_axis = np.cross(normal, x_axis)
                rot_mat_w = np.transpose(np.array([[x_axis[0], x_axis[1], x_axis[2]],[y_axis[0], y_axis[1], y_axis[2]], [normal[0], normal[1], normal[2]]]))
                board_origin_in_watch_frame = compute_board_origin_in_base_frame(board_origin, rot_mat_w, watch_origin)
                try:
                    Tb_w = compute_transformation_from_board_to_base(board_to_cam_rot_mtx, rot_mat_w, board_origin_in_watch_frame)
                    valid_watch = True
                except:
                    pass

            self.pub_msg.header.stamp = rospy.Time.now()
            self.pub2_msg.header.stamp = rospy.Time.now()
            self.pub_watch_msg.header.stamp = rospy.Time.now()

            if valid_robot_r:
                Tb_r = np.linalg.inv(Tb_r)
                T = np.zeros((4,4))
                rad = 45.*np.pi/180.
                c = np.cos(rad)
                s = np.sin(rad)
                T[0,0] = c
                T[0,1] = -s
                T[1,0] = s
                T[1,1] = c
                T[2,2] = 1
                T[3,3] = 1
                # print(T)
                Tb_r = np.matmul(Tb_r, T)
            else:
                print(str(self.fr1) + ': Invalid right robot transformation. Using previous valid.')

            self.pub_msg.pose.position.x = Tb_r[0,3]
            self.pub_msg.pose.position.y = Tb_r[1,3]
            self.pub_msg.pose.position.z = Tb_r[2,3]
            T = copy.deepcopy(Tb_r)
            T[0:3,3] = np.array([0,0,0])
            quat_br = tf.transformations.quaternion_from_matrix(T)
            quat_br = quat_br/np.linalg.norm(quat_br)
            self.pub_msg.pose.orientation.x = quat_br[0]
            self.pub_msg.pose.orientation.y = quat_br[1]
            self.pub_msg.pose.orientation.z = quat_br[2]
            self.pub_msg.pose.orientation.w = quat_br[3]

            if valid_robot_l:
                Tb_l = np.linalg.inv(Tb_l)
                T = np.zeros((4,4))
                rad = 45.*np.pi/180.
                c = np.cos(rad)
                s = np.sin(rad)
                T[0,0] = c
                T[0,1] = -s
                T[1,0] = s
                T[1,1] = c
                T[2,2] = 1
                T[3,3] = 1
                # print(T)
                Tb_l = np.matmul(Tb_l, T)
            else:
                print(str(self.fr1) + ': Invalid left robot transformation. Using previous valid.')

            self.pub2_msg.pose.position.x = Tb_l[0,3]
            self.pub2_msg.pose.position.y = Tb_l[1,3]
            self.pub2_msg.pose.position.z = Tb_l[2,3]
            T = copy.deepcopy(Tb_l)
            T[0:3,3] = np.array([0,0,0])
            quat_bl = tf.transformations.quaternion_from_matrix(T)
            quat_bl = quat_bl/np.linalg.norm(quat_bl)
            self.pub2_msg.pose.orientation.x = quat_bl[0]
            self.pub2_msg.pose.orientation.y = quat_bl[1]
            self.pub2_msg.pose.orientation.z = quat_bl[2]
            self.pub2_msg.pose.orientation.w = quat_bl[3]

            if valid_watch:
                Tb_w = np.linalg.inv(Tb_w)
                # T = np.zeros((4,4))
                # rad = np.pi
                # c = np.cos(rad)
                # s = np.sin(rad)
                # T[0,0] = c
                # T[0,1] = -s
                # T[1,0] = s
                # T[1,1] = c
                # T[2,2] = 1
                # T[3,3] = 1
                # # print(T)
                # Tb_w = np.matmul(Tb_w, T)
            else:
                print(str(self.fr1) + ': Invalid watch face transformation. Using previous valid.')

            self.pub_watch_msg.pose.position.x = Tb_w[0,3]
            self.pub_watch_msg.pose.position.y = Tb_w[1,3]
            self.pub_watch_msg.pose.position.z = Tb_w[2,3]
            T = copy.deepcopy(Tb_w)
            T[0:3,3] = np.array([0,0,0])
            quat_bw = tf.transformations.quaternion_from_matrix(T)
            quat_bw = quat_bw/np.linalg.norm(quat_bw)
            self.pub_watch_msg.pose.orientation.x = quat_bw[0]
            self.pub_watch_msg.pose.orientation.y = quat_bw[1]
            self.pub_watch_msg.pose.orientation.z = quat_bw[2]
            self.pub_watch_msg.pose.orientation.w = quat_bw[3]

            self.pub.publish(self.pub_msg)
            self.pub2.publish(self.pub2_msg)
            self.pub_watch.publish(self.pub_watch_msg)

            if not valid_board:
                print(str(self.fr1) + ': Invalid board transformation. Using previous valid.')

            self.br.sendTransform(Tb_r[0:3,3], quat_br, rospy.Time.now(), 'robot', 'board')
            self.br.sendTransform(Tb_l[0:3,3], quat_bl, rospy.Time.now(), 'robot_left', 'board')
            self.br.sendTransform(Tb_w[0:3,3], quat_bw, rospy.Time.now(), 'watch_face', 'board')

            # # plot coordinate frame in 3D for debugging
            # try:
            #     QueryImg = draw_axis(QueryImg, rot_mat, robot_origin, mtx, dist)
            # except Exception as ex:
            #     print(ex)
            #     pass
            # try:
            #     QueryImg = draw_axis(QueryImg, rot_mat_l, robot_origin_l, mtx, dist)
            # except Exception as ex:
            #     print(ex)
            #     pass
            # try:
            #     QueryImg = draw_axis(QueryImg, rot_mat_w, watch_origin, mtx, dist)
            # except Exception as ex:
            #     print(ex)
            #     pass
            # # cv2.imwrite('./out/img.png', QueryImg)
            # cv2.imwrite('./out/imgA_{}.png'.format(self.fr1), QueryImg)
            # cv2.imwrite('./out/imgB_{}.png'.format(self.fr2), QueryImg1)
            rate.sleep()
    def calibrateWithBoard(self, imgs, sensor, draw=False):

        # Constant parameters used in Aruco methods
        ARUCO_PARAMETERS = aruco.DetectorParameters_create()
        ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_50)

        # Create grid board object we're using in our stream
        CHARUCO_BOARD = aruco.CharucoBoard_create(
            squaresX=10,
            squaresY=6,
            squareLength=0.04,  #in meters
            markerLength=0.03,  #in meters
            dictionary=ARUCO_DICT)

        # grayscale image
        gray = cv2.cvtColor(imgs[sensor], cv2.COLOR_BGR2GRAY)

        # Detect Aruco markers
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

        # Refine detected markers
        # Eliminates markers not part of our board, adds missing markers to the board
        corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
            image=gray,
            board=CHARUCO_BOARD,
            detectedCorners=corners,
            detectedIds=ids,
            rejectedCorners=rejectedImgPoints,
            cameraMatrix=self.intrinsic[sensor],
            distCoeffs=self.distCoeffs)

        #print('Found {} corners in C{} sensor {}'.format(len(corners), self.camId, sensor))
        imgs[sensor] = aruco.drawDetectedMarkers(imgs[sensor],
                                                 corners,
                                                 ids=ids,
                                                 borderColor=(0, 0, 255))

        # Only try to find CharucoBoard if we found markers
        if ids is not None and len(ids) > 10:

            # Get charuco corners and ids from detected aruco markers
            response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(
                markerCorners=corners,
                markerIds=ids,
                image=gray,
                board=CHARUCO_BOARD)

            # Require more than 20 squares
            if response is not None and response > 20:
                # Estimate the posture of the charuco board, which is a construction of 3D space based on the 2D video
                pose, self.extrinsic[sensor]['rvec'], self.extrinsic[sensor][
                    'tvec'] = aruco.estimatePoseCharucoBoard(
                        charucoCorners=charuco_corners,
                        charucoIds=charuco_ids,
                        board=CHARUCO_BOARD,
                        cameraMatrix=self.intrinsic[sensor],
                        distCoeffs=self.distCoeffs)
                if draw:
                    imgs[sensor] = aruco.drawAxis(
                        imgs[sensor], self.intrinsic[sensor], self.distCoeffs,
                        self.extrinsic[sensor]['rvec'],
                        self.extrinsic[sensor]['tvec'], 2)
                    cv2.imwrite(
                        './data/out/calib_C{}_{}.png'.format(
                            self.camId, sensor), imgs[sensor])
        else:
            print('Calibration board is not fully visible for C{} sensor: {}'.
                  format(self.camId, sensor))
            assert 1 == 0

        self.extrinsic[sensor]['rvec'] = cv2.Rodrigues(
            self.extrinsic[sensor]['rvec'])[0]
        self.extrinsic[sensor]['projMatrix'] = np.matmul(
            self.intrinsic[sensor],
            np.concatenate((self.extrinsic[sensor]['rvec'],
                            self.extrinsic[sensor]['tvec']),
                           axis=1))
예제 #5
0
    # Захват кадра из видеопотока
    ret, QueryImg = cam.read()

    if ret == True:
        # Перевод в GRAY
        gray = cv2.cvtColor(QueryImg, cv2.COLOR_BGR2GRAY)

        # Детектирование Aruco маркеров
        corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

        # Уточнение обнаруженных маркеров
        # Удаление маркеров, не принадлежащих нашей доски и поиск не добавленных ранее
        corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
                image = gray,
                board = board,
                detectedCorners = corners,
                detectedIds = ids,
                rejectedCorners = rejectedImgPoints,
                cameraMatrix = cameraMatrix,
                distCoeffs = distCoeffs)

        # Отрисовка контуров обнаруженных маркеров
        QueryImg = aruco.drawDetectedMarkers(QueryImg, corners, borderColor=(0, 0, 255))

        # Если обнаружен маркер, то...
        if ids is not None and len(ids) > 0:
            # Определение его положения
            rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(corners, ids, cameraMatrix, distCoeffs)

            # Отрисовка куба на кадре
            cube = draw(QueryImg, rvecs, tvecs, corners)
            cv2.imshow('cube', cube)
예제 #6
0
def Video2():
    # Load Calibrated Parameters
    calibrationFile = "calibrationFileName.xml"
    calibrationParams = cv2.FileStorage(calibrationFile, cv2.FILE_STORAGE_READ)
    camera_matrix = calibrationParams.getNode("cameraMatrix").mat()
    dist_coeffs = calibrationParams.getNode("distCoeffs").mat()

    r = calibrationParams.getNode("R").mat()
    new_camera_matrix = calibrationParams.getNode("newCameraMatrix").mat()

    image_size = (1920, 1080)
    #map1, map2 = cv2.fisheye.initUndistortRectifyMap(camera_matrix, dist_coeffs, r, new_camera_matrix, image_size, cv2.CV_16SC2)

    aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_6X6_1000)
    markerLength = 20  # Here, our measurement unit is centimetre.
    markerSeparation = 4  # Here, our measurement unit is centimetre.
    board = aruco.GridBoard_create(1, 1, markerLength, markerSeparation,
                                   aruco_dict)
    arucoParams = aruco.DetectorParameters_create()
    img = board.draw(outSize=(800, 800))
    #ShowImage(img)

    videoFile = 0
    cap = cv2.VideoCapture(videoFile)

    while (True):
        ret, frame = cap.read()  # Capture frame-by-frame
        if ret == True:
            #frame_remapped = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)     # for fisheye remapping
            frame_remapped_gray = cv2.cvtColor(
                frame, cv2.COLOR_BGR2GRAY
            )  # aruco.detectMarkers() requires gray image

            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                frame_remapped_gray, aruco_dict,
                parameters=arucoParams)  # First, detect markers
            aruco.refineDetectedMarkers(frame_remapped_gray, board, corners,
                                        ids, rejectedImgPoints)

            if ids != None:  # if there is at least one marker detected
                im_with_aruco_board = aruco.drawDetectedMarkers(
                    frame, corners, ids, (0, 255, 0))

                retval, rvec, tvec = aruco.estimatePoseBoard(
                    corners, ids, board, camera_matrix,
                    dist_coeffs)  # posture estimation from a aruco board
                if retval != 0:
                    x = np.array([[10], [-10], [0]], np.int32)
                    new_tvec = tvec + x
                    im_with_aruco_board = aruco.drawAxis(
                        im_with_aruco_board, camera_matrix, dist_coeffs, rvec,
                        new_tvec, 50
                    )  # axis length 100 can be changed according to your requirement
            else:
                im_with_aruco_board = frame

            cv2.imshow("arucoboard", im_with_aruco_board)

            if cv2.waitKey(2) & 0xFF == ord('q'):
                break
        else:
            break

    cap.release()  # When everything done, release the capture
    cv2.destroyAllWindows()
while 1:
    # Capture frame
    ret, img = cap.read()
    ret2, img2 = cap2.read()

    # Convert to HSV
    # img = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

    # detect markers
    corners, ids, rejectedImgPoints = aruco.detectMarkers(img,
                                                          aruco_dict,
                                                          parameters=param)
    corners2, ids2, rejectedImgPoints2 = aruco.detectMarkers(img2,
                                                             aruco_dict,
                                                             parameters=param)
    aruco.refineDetectedMarkers(img, board, corners, ids, rejectedImgPoints,
                                cameraMatrix_left, distCoeff_left)
    aruco.refineDetectedMarkers(img2, board, corners2, ids2,
                                rejectedImgPoints2, cameraMatrix_right,
                                distCoeff_right)

    # estimate pose from Aruco board
    if (ids is not None) and (ids2 is not None):
        if img_count <= 1:
            cv.imwrite(IMG_PATH + "left/left_aruco_" + id_label + '.png', img)
            cv.imwrite(IMG_PATH + "right/right_aruco_" + id_label + '.png',
                       img2)
            img_count += 1
        img = aruco.drawDetectedMarkers(img, corners)
        img2 = aruco.drawDetectedMarkers(img2, corners2)
        retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board,
                                                     cameraMatrix_left,
예제 #8
0
 def estimate_pose_board(self, img_gray, corners, ids, rejectedImgPoints, board):
     nmarkers, rvec, tvec = estimatePoseBoard(corners, ids, board, camera_matrix, dist_coeffs, None, None, useExtrinsicGuess=False)
     if nmarkers > 0:
         corners2, ids2, _, _ = aruco.refineDetectedMarkers(img_gray, board, corners, ids, rejectedImgPoints, camera_matrix, dist_coeffs, parameters=arucoParams)
         nmarkers, rvec, tvec = estimatePoseBoard(corners2, ids2, board, camera_matrix, dist_coeffs, None, None, useExtrinsicGuess=False)
     return nmarkers, rvec, tvec
예제 #9
0
def get_webcam_reference(videoFilePath,
                         cParamsFilePath,
                         dictionary,
                         markerSize,
                         pose_timeout=5,
                         show_video=False):
    """
        Function that returns the position and orientation of a marker from its
        initial position. The input is a video file containing marker
    """
    # Open video file and get number of frames
    print('Preprocessing: counting number of frames')
    n_frames = count_frames_manual(videoFilePath)
    cap = cv2.VideoCapture(videoFilePath)

    # Parameters from camera calibration
    cal = pickle.load(open("tst_chessboard_lab.p", "rb"))
    #    cal = pickle.load(open(cParamsFilePath, "rb" ))
    cMat = cal[0]
    dist = cal[1]

    # Initialze parameters to determine initial position
    pose_0_set = False
    pose_0 = ()
    t_start = time.time()
    t_set_pose_0 = t_start + pose_timeout

    # Initialize collections
    all_tvec = np.array([[], [], []])
    all_rvec = np.array([[], [], []])
    print('Postprocessing: tracking marker')
    # Define the codec and create VideoWriter object
    size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    out = cv2.VideoWriter('output.avi', fourcc, 29.0, size,
                          False)  # 'False' for 1-ch instead of 3-ch for color
    parameters = aruco.DetectorParameters_create(
    )  # Obtain detection parameters
    #    parameters.minCornerDistanceRate = 0.1
    #    parameters.minOtsuStdDev = 0.1
    #    parameters.maxErroneousBitsInBorderRate = 0.5
    #    parameters.adaptiveThreshConstant = 30
    #    parameters.perspectiveRemovePixelPerCell = 1000
    #    parameters.polygonalApproxAccuracyRate = 0.1
    # Capture frame-by-frame
    for i in range(n_frames):

        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # Convert to grayscale
        gray = frame
        #        print(help(parameters))
        # lists of ids and the corners belonging to each id
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, dictionary, parameters=parameters)
        corners, ids, rejectedImgPoints, recoveredIdxs = aruco.refineDetectedMarkers(
            gray,
            board,
            corners,
            ids,
            rejectedCorners=rejectedImgPoints,
            cameraMatrix=cMat,
            distCoeffs=dist)

        if ids is not None:

            # Obtain rotation and translation vectors
            rvec, tvec = aruco.estimatePoseSingleMarkers(
                corners, markerSize, cameraMatrix=cMat, distCoeffs=dist
            )  # corners, size of markers in meters, [3x3] intrinsic camera parameters, 5 distortion coefficients
            rvec = np.array([rvec.item(0), rvec.item(1), rvec.item(2)])
            tvec = np.array([tvec.item(0), tvec.item(1), tvec.item(2)])

            if time.time() >= t_set_pose_0 and not pose_0_set:
                pose_0 = (rvec, tvec)
                pose_0_set = True

            if pose_0_set:

                rvec_0 = pose_0[0]
                tvec_0 = pose_0[1]

                tvec_n = tvec - tvec_0
                rvec_n = rvec - rvec_0

                t0 = np.append(all_tvec[0], tvec[0])
                t1 = np.append(all_tvec[1], tvec[1])
                t2 = np.append(all_tvec[2], tvec[2])
                all_tvec = np.array([t0, t1, t2])

                r0 = np.append(all_rvec[0], rvec[0])
                r1 = np.append(all_rvec[1], rvec[1])
                r2 = np.append(all_rvec[2], rvec[2])
                all_rvec = np.array([r0, r1, r2])

                r = np.sqrt(np.power(tvec_n[0], 2) + np.power(tvec_n[1], 2))

#                print('Translation from initial position: ', tvec_n)
#                print('Rotation from initial position: ', rvec_n)
#                print('Total distance from initial position: ', r)

# show information on image
            frameWithMarkers = aruco.drawDetectedMarkers(
                gray, corners)  # Draw marker borders
            cv2.putText(frameWithMarkers, "ID: " + str(ids), (0, 64),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)  # Show detected IDs
            aruco.drawAxis(frameWithMarkers, cMat, dist, rvec, tvec,
                           0.1)  #Draw Axis

            # Display the resulting frame
            if show_video:
                cv2.imshow('frame', frameWithMarkers)
                out.write(gray)

        else:
            # Display: no IDs
            cv2.putText(gray, "No IDs", (0, 64), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 0), 2, cv2.LINE_AA)
            if show_video:
                cv2.imshow('frame', gray)
                out.write(gray)

        # Stop when q is pressed
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    out.release()
    cv2.destroyAllWindows()

    return all_tvec, all_rvec
while (cam.isOpened()):
    # Capture current frame
    ret, QueryImg = cam.read()
    if ret == True:
        # Convert image to grayscale
        dst = cv2.cvtColor(QueryImg, cv2.COLOR_BGR2GRAY)

        # Detect Aruco markers
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            dst, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

        # Refine detected markers
        corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
            image=dst,
            board=CHARUCO_BOARD,
            detectedCorners=corners,
            detectedIds=ids,
            rejectedCorners=rejectedImgPoints,
            cameraMatrix=cameraMatrix,
            distCoeffs=distCoeffs)

        # Outline the markers
        QueryImg = aruco.drawDetectedMarkers(QueryImg,
                                             corners,
                                             borderColor=(0, 0, 255))

        # Only try to find CharucoBoard if markers were detected
        if ids is not None and len(ids) > 10:

            # Get charuco corners and ids from detected aruco markers
            response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(
                markerCorners=corners,
예제 #11
0
    def findCharucoBoard(self, req):
        self.rvecs_arr = np.zeros((3, NUMBER))
        self.tvecs_arr = np.zeros((3, NUMBER))
        res = aruco_infoResponse()

        for order in range(NUMBER):
            # Capturing each frame of our video stream
            # ret, self.QueryImg = self.cam.read()
            frames = self.pipeline.wait_for_frames()
            color_frame = frames.get_color_frame()
            self.QueryImg = np.asanyarray(color_frame.get_data())
            # grayscale image
            gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)

            # Detect Aruco markers
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray, self.aruco_dict, parameters=self.aruco_param)

            # Refine detected markers
            # Eliminates markers not part of our board, adds missing markers to the board
            corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
                image=gray,
                board=self.charuco_board,
                detectedCorners=corners,
                detectedIds=ids,
                rejectedCorners=rejectedImgPoints,
                cameraMatrix=self.cameraMatrix,
                distCoeffs=self.distCoeffs)

            # Require 15 markers before drawing axis
            if ids is not None and len(ids) > 10:
                response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(
                    markerCorners=corners,
                    markerIds=ids,
                    image=gray,
                    board=self.charuco_board)

                # Require more than 20 squares
                if response is not None and response > 20:
                    # Estimate the posture of the charuco board, which is a construction of 3D space based on the 2D video
                    pose, rvec, tvec = aruco.estimatePoseCharucoBoard(
                        charucoCorners=charuco_corners,
                        charucoIds=charuco_ids,
                        board=self.charuco_board,
                        cameraMatrix=self.cameraMatrix,
                        distCoeffs=self.distCoeffs)
                    if pose:
                        if order == 0:
                            print(
                                "============================================="
                            )
                            print(rvec)
                            print(tvec)
                        for i in range(3):
                            self.rvecs_arr[i][order] = rvec[i][0]
                            self.tvecs_arr[i][order] = tvec[i][0]

                #     self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, rvec, tvec, 0.02)
            cv2.waitKey(10)
            # Display our image
        # print('self.rvecs_arr = ', self.rvecs_arr)
        # print('self.tvecs_arr = ', self.tvecs_arr)
        cv2.destroyAllWindows()
        r_avg = np.zeros(3)
        t_avg = np.zeros(3)

        ra = self.rvecs_arr[0].nonzero()
        rb = self.rvecs_arr[1].nonzero()
        rc = self.rvecs_arr[2].nonzero()
        tx = self.tvecs_arr[0].nonzero()
        ty = self.tvecs_arr[1].nonzero()
        tz = self.tvecs_arr[2].nonzero()
        ra = self.rvecs_arr[0][ra]
        rb = self.rvecs_arr[1][rb]
        rc = self.rvecs_arr[2][rc]
        tx = self.tvecs_arr[0][tx]
        ty = self.tvecs_arr[1][ty]
        tz = self.tvecs_arr[2][tz]
        ra = np.sort(ra, kind='quicksort')
        rb = np.sort(rb, kind='quicksort')
        rc = np.sort(rc, kind='quicksort')
        tx = np.sort(tx, kind='quicksort')
        ty = np.sort(ty, kind='quicksort')
        tz = np.sort(tz, kind='quicksort')
        r = np.array((ra, rb, rc))
        t = np.array((tx, ty, tz))
        for i in range(3):
            rv, tv = r[i], t[i]

            while np.std(rv) > 0.01 and len(rv) >= NUMBER * 0.2:
                if abs(rv[0] - np.average(rv)) > abs(rv[-1] - np.average(rv)):
                    rv = np.delete(rv, 0)
                else:
                    rv = np.delete(rv, -1)
            while np.std(tv) > 0.01 and len(tv) >= NUMBER * 0.2:
                if abs(tv[0] - np.average(tv)) > abs(tv[-1] - np.average(tv)):
                    tv = np.delete(tv, 0)
                else:
                    tv = np.delete(tv, -1)

            r_avg[i] = np.average(rv)
            t_avg[i] = np.average(tv)

        res.rvecs = r_avg
        res.tvecs = t_avg
        print('res.rvecs is ', res.rvecs)
        print('res.tvecs is ', res.tvecs)
        result = np.array(())
        result = np.append(result, [np.copy(r_avg), np.copy(t_avg)])

        result = result.reshape(2, 3)

        filename = self.curr_path + "/pic/charucoboard-pic-" + str(
            int(self.frameId)) + ".jpg"
        cv2.imwrite(filename, self.QueryImg)
        # Outline all of the markers detected in our image
        self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg,
                                                  corners,
                                                  borderColor=(0, 0, 255))
        self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix,
                                       self.distCoeffs, result[0], result[1],
                                       0.02)
        # self.curr_path = os.path.dirname(os.path.abspath(__file__))
        filename = self.curr_path + "/pic/detection result-" + str(
            int(self.frameId)) + ".jpg"
        cv2.imwrite(filename, self.QueryImg)
        self.frameId += 1
        print('------')

        return res
예제 #12
0
def aruco_fun():
    global pipe, cameraMatrix, distCoeffs, depth_intrinsics
    axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]]).reshape(-1, 3)
    if True:

        frames = pipe.wait_for_frames()
        align_to = rs.stream.color
        align = rs.align(align_to)
        aligned_frames = align.process(frames)
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()
        color_img = np.array(color_frame.get_data())
        color_img_temp = copy.deepcopy(color_img)
        depth_img = np.array(depth_frame.get_data())
        frame = color_img_temp
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        parameters = aruco.DetectorParameters_create()
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        #print(corners,ids)
        #center_p(corners, depth_img,cameraMatrix)
        #if False:
        if ids is not None and len(ids) > 0:
            global wheel_chair_translation_vectors, find_wheel_chair
            #print("len",len(ids[0]))
            num = int(len(ids))
            for id_obj in range(num):

                if ids[id_obj] == 6:
                    find_wheel_chair = 1
                    wheel_chair_translation_vectors = get_xyz(
                        corners[id_obj], depth_img, cameraMatrix,
                        depth_intrinsics)
                elif ids[id_obj] != 6:
                    if find_wheel_chair == 0:
                        continue
                    obj_translation_vectors = get_xyz(corners[id_obj],
                                                      depth_img, cameraMatrix,
                                                      depth_intrinsics)
                    p = obj_translation_vectors - wheel_chair_translation_vectors
                    print(ids[id_obj], " ", "postion vetor is", p)
                    #process_data(ids[id_obj],p)
        gray = aruco.drawDetectedMarkers(gray, corners)
        cv2.imshow('frame', gray)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            #break
    if False:
        #while(True):
        frames = pipe.wait_for_frames()

        color_frame = frames.get_color_frame()
        # Convert images to numpy arrays
        color_image = np.asanyarray(color_frame.get_data())
        gray = cv2.cvtColor(color_image.copy(), cv2.COLOR_RGB2GRAY)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)
        corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
            image=gray,
            board=board,
            detectedCorners=corners,
            detectedIds=ids,
            rejectedCorners=rejectedImgPoints,
            cameraMatrix=cameraMatrix,
            distCoeffs=distCoeffs)
        ProjectImage = color_image.copy()
        ProjectImage = aruco.drawDetectedMarkers(ProjectImage,
                                                 corners,
                                                 borderColor=(0, 0, 255))
        print(ids)
        if ids is not None and len(ids) > 0:
            # Estimate the posture per each Aruco marker
            rotation_vectors, translation_vectors, _objPoints = aruco.estimatePoseSingleMarkers(
                corners, 1, cameraMatrix, distCoeffs)
            #global wheel_chair_rotation_vectors,  wheel_chair_translation_vectors, find_wheel_chair ,w2c
            print("len", len(ids[0]))
            num = int(len(ids))
            for id_obj in range(num):

                if ids[id_obj] == 6:
                    find_wheel_chair = 1
                    wheel_chair_rotation_vectors = cv2.Rodrigues(
                        rotation_vectors[id_obj][0])[0]
                    wheel_chair_translation_vectors = rotation_vectors[id_obj]
                    c2w = np.c_[wheel_chair_rotation_vectors,
                                wheel_chair_translation_vectors[0]]
                    temp = np.array([0, 0, 0, 1])
                    c2w = np.r_[c2w, [temp]]
                    #w2c = np.linalg.inv(c2w)
                    w2c = np.c_[wheel_chair_rotation_vectors.T,
                                -wheel_chair_rotation_vectors.T.
                                dot(wheel_chair_translation_vectors[0])]
                    w2c = np.r_[w2c, [temp]]
                elif ids[id_obj] != 6:
                    if find_wheel_chair == 0:
                        continue
                    obj_rotation_vectors = cv2.Rodrigues(
                        rotation_vectors[id_obj][0])[0]
                    obj_translation_vectors = rotation_vectors[id_obj]
                    c2o = np.c_[obj_rotation_vectors,
                                obj_translation_vectors[0]]

                    temp = np.array([0, 0, 0, 1])
                    c2o = np.r_[c2o, [temp]]
                    w2o = w2c.dot(c2o)
                    #p = w2o[:,3][:3]
                    p = compute_w2o(wheel_chair_rotation_vectors,
                                    wheel_chair_translation_vectors,
                                    obj_rotation_vectors,
                                    obj_translation_vectors)

                    print(ids[id_obj], " ", "postion vetor is", p)
                    #process_data(ids[id_obj],p)

            ids = 1

            for rvec in rotation_vectors:
                rotation_mat = cv2.Rodrigues(rvec[0])[0]
                ids = ids + 1

            for rvec, tvec in zip(rotation_vectors, translation_vectors):
                if len(sys.argv) == 2 and sys.argv[1] == 'cube':
                    try:
                        imgpts, jac = cv2.projectPoints(
                            axis, rvec, tvec, cameraMatrix, distCoeffs)
                        ProjectImage = drawCube(ProjectImage, corners, imgpts)
                    except:
                        continue
                else:
                    ProjectImage = aruco.drawAxis(ProjectImage, cameraMatrix,
                                                  distCoeffs, rvec, tvec, 1)
        cv2.imshow('ProjectImage', ProjectImage)
        #frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
        #cv2.imshow('frame',frame_markers)
        #end = datetime.now()
        #exec_time = end - start
        #print("aruco control")
        #print(exec_time,exec_time.total_seconds())
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
예제 #13
0
    def findCharucoBoard(self, req):
        self.rvecs_arr = np.zeros((3, NUMBER))
        self.tvecs_arr = np.zeros((3, NUMBER))
        res = aruco_infoResponse()

        for order in range(NUMBER):
            # Capturing each frame of our video stream
            # ret, self.QueryImg = self.cam.read()
            # frames = self.pipeline.wait_for_frames()
            # color_frame = frames.get_color_frame()
            # self.QueryImg = np.asanyarray(color_frame.get_data())
            # grayscale image
            gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)

            # Detect Aruco markers
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

            # Refine detected markers
            # Eliminates markers not part of our board, adds missing markers to the board
            corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
                image=gray,
                board=CHARUCO_BOARD,
                detectedCorners=corners,
                detectedIds=ids,
                rejectedCorners=rejectedImgPoints,
                cameraMatrix=self.cameraMatrix,
                distCoeffs=self.distCoeffs)

            # Require 15 markers before drawing axis
            if ids is not None and len(ids) > 10:
                response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(
                    markerCorners=corners,
                    markerIds=ids,
                    image=gray,
                    board=CHARUCO_BOARD)

                # Require more than 20 squares
                if response is not None and response > 20:
                    # Estimate the posture of the charuco board, which is a construction of 3D space based on the 2D video
                    pose, rvec_, tvec = aruco.estimatePoseCharucoBoard(
                        charucoCorners=charuco_corners,
                        charucoIds=charuco_ids,
                        board=CHARUCO_BOARD,
                        cameraMatrix=self.cameraMatrix,
                        distCoeffs=self.distCoeffs,
                        rvec=np.zeros(3),
                        tvec=np.zeros(3))

                    # self.rvecs, self.tvecs = aruco.estimatePoseSingleMarkers(corners, self.markersize, self.cameraMatrix, self.distCoeffs)
                    # for _id, rvec, tvec in zip(ids, self.rvecs, self.tvecs):
                    if pose:
                        print(rvec_)
                        print(tvec)
                        if order == 0:
                            print(
                                "============================================="
                            )
                            print(rvec_)
                            print(tvec)
                        for i in range(3):
                            self.rvecs_arr[i][order] = rvec_[i]
                            self.tvecs_arr[i][order] = tvec[i]

                #     self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, rvec, tvec, 0.02)
            cv2.waitKey(10)
            # Display our image
        # print('self.rvecs_arr = ', self.rvecs_arr)
        # print('self.tvecs_arr = ', self.tvecs_arr)
        cv2.destroyAllWindows()
        r_avg = np.zeros(3)
        t_avg = np.zeros(3)

        ra = self.rvecs_arr[0].nonzero()
        rb = self.rvecs_arr[1].nonzero()
        rc = self.rvecs_arr[2].nonzero()
        tx = self.tvecs_arr[0].nonzero()
        ty = self.tvecs_arr[1].nonzero()
        tz = self.tvecs_arr[2].nonzero()
        ra = self.rvecs_arr[0][ra]
        rb = self.rvecs_arr[1][rb]
        rc = self.rvecs_arr[2][rc]
        tx = self.tvecs_arr[0][tx]
        ty = self.tvecs_arr[1][ty]
        tz = self.tvecs_arr[2][tz]
        ra = np.sort(ra, kind='quicksort')
        rb = np.sort(rb, kind='quicksort')
        rc = np.sort(rc, kind='quicksort')
        tx = np.sort(tx, kind='quicksort')
        ty = np.sort(ty, kind='quicksort')
        tz = np.sort(tz, kind='quicksort')
        r = np.array((ra, rb, rc))
        t = np.array((tx, ty, tz))
        for i in range(3):
            rv, tv = r[i], t[i]

            while np.std(rv) > 0.01 and len(rv) >= NUMBER * 0.2:
                if abs(rv[0] - np.average(rv)) > abs(rv[-1] - np.average(rv)):
                    rv = np.delete(rv, 0)
                else:
                    rv = np.delete(rv, -1)
            while np.std(tv) > 0.01 and len(tv) >= NUMBER * 0.2:
                if abs(tv[0] - np.average(tv)) > abs(tv[-1] - np.average(tv)):
                    tv = np.delete(tv, 0)
                else:
                    tv = np.delete(tv, -1)

            r_avg[i] = np.average(rv)
            t_avg[i] = np.average(tv)

        # print('[_id, r,t] = ', [_id, r,t])
        # res.ids.append(_id)
        # res.rvecs = np.append(res.rvecs, r_avg)
        # res.tvecs = np.append(res.tvecs, t_avg)
        res.rvecs = r_avg
        res.tvecs = t_avg
        print('res.rvecs is ', res.rvecs)
        print('res.tvecs is ', res.tvecs)
        result = np.array(())
        result = np.append(result, [np.copy(r_avg), np.copy(t_avg)])

        result = result.reshape(2, 3)

        if self.name == 'test':
            # Outline all of the markers detected in our image
            self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg,
                                                      corners,
                                                      borderColor=(0, 0, 255))
            self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix,
                                           self.distCoeffs, result[0],
                                           result[1], 0.02)
            curr_path = os.path.dirname(os.path.abspath(__file__))
            filename = curr_path + "/pic/camera-pic-of-charucoboard-" + str(
                int(self.frameId)) + ".jpg"
            cv2.imwrite(filename, self.QueryImg)
            self.frameId += 1
            # cv2.imwrite('./123%d.jpg'%self.cnd, self.QueryImg)
            # self.cnd += 1
            # cv2.namedWindow('Amanda', cv2.WINDOW_AUTOSIZE)
            # self.QueryImg = cv2.imread('./123%d.jpg'%self.cnd)
            # cv2.imshow('Amanda', self.QueryImg)
            # cv2.waitKey(1000)
            # cv2.destroyAllWindows()

            # time.sleep(2)
            print('------')
            # while not cv2.waitKey(1) & 0xFF == ord('q'):
            #     pass
            # cv2.destroyAllWindows()
        print("self.cameraMatrix.reshape(1,9):",
              self.cameraMatrix.reshape(1, 9)[0])
        res.camera_mat = self.cameraMatrix.reshape(1, 9)[0]
        return res
예제 #14
0
def get_corners_aruco(fname, board, skip=20):
    cap = cv2.VideoCapture(fname)

    length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    allCorners = []
    allIds = []

    go = int(skip / 2)

    board_type = get_board_type(board)
    board_size = get_board_size(board)

    max_size = get_expected_corners(board)

    for framenum in trange(length, ncols=70):
        ret, frame = cap.read()
        if not ret:
            break

        if framenum % skip != 0 and go <= 0:
            continue

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        params = aruco.DetectorParameters_create()
        params.cornerRefinementMethod = aruco.CORNER_REFINE_CONTOUR
        params.adaptiveThreshWinSizeMin = 100
        params.adaptiveThreshWinSizeMax = 700
        params.adaptiveThreshWinSizeStep = 50
        params.adaptiveThreshConstant = 5

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, board.dictionary, parameters=params
        )

        if corners is None or len(corners) <= 2:
            go = max(0, go - 1)
            continue

        detectedCorners, detectedIds, rejectedCorners, recoveredIdxs = aruco.refineDetectedMarkers(
            gray, board, corners, ids, rejectedImgPoints, parameters=params
        )

        if board_type == "charuco" and len(detectedCorners) > 0:
            ret, detectedCorners, detectedIds = aruco.interpolateCornersCharuco(
                detectedCorners, detectedIds, gray, board
            )

        if (
            detectedCorners is not None
            and len(detectedCorners) >= 2
            and len(detectedCorners) <= max_size
        ):
            allCorners.append(detectedCorners)
            allIds.append(detectedIds)
            go = int(skip / 2)

        go = max(0, go - 1)

    cap.release()

    return allCorners, allIds
예제 #15
0
def get_webcam_reference(video_file, cam_params_file, dictionary, marker_size, board, show_video=False, save_output=False, output_file_name='output.avi', webcam_stream=False):
    """
        Function that returns the position and orientation of a marker from its
        initial position.

        INPUTS:
            - video_file: path to a video file to be processed
            - cam_params_file: pickle file containing parameters from camera calibration
            - dictionary: aruco predifined dictionary used to generate markers
            - board: aruco marker board
            - marker_size: size of marker to detect in meters
            - show_video (default=False): play video with detection results. Video playback can be stopped by pressing q.
            - save_output (default=False): save the detection output to a video file
            - output_file_name (default='output.avi'): name of output video file

        OUTPUTS:
            - all_tvec: marker coordinates of each frame [x, y, z]
            - all_rvec: marker orientations of each frame [x, y, z]

    """

    # Open video file and get number of frames

    if webcam_stream:
        cap = cv2.VideoCapture(0)
    else:
        print('Preprocessing: counting number of frames')
        n_frames = count_frames_manual(video_file)
        cap = cv2.VideoCapture(video_file)

    loop = True  # Stop condition for while loop
    i = 0  # Current iteration. Loop needs to stop when all frames of videofile have been processed

    # Parameters from camera calibration
    cal = pickle.load(open(cam_params_file, "rb" ))
    cMat = cal[0]
    dist = cal[1][0]

    # Initialize collections
    all_tvec = list()
    all_rvec = list()
    print('Tracking marker')

    if save_output:
        # Define the codec and create VideoWriter object
        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        fourcc = cv2.VideoWriter_fourcc(*'DIVX')
        out = cv2.VideoWriter(output_file_name,fourcc, 29.0, size, True)  # 'False' for 1-ch instead of 3-ch for color

    parameters =  aruco.DetectorParameters_create() # Obtain detection parameters

    while(loop):

        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to grayscale
        gray = frame

        # lists of ids and the corners belonging to each id
        corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary, parameters=parameters)
        corners, ids, rejectedImgPoints, recoveredIdxs = aruco.refineDetectedMarkers(gray, board, corners, ids, rejectedCorners=rejectedImgPoints, cameraMatrix=cMat, distCoeffs=dist)

        if ids is not None:

            # Obtain rotation and translation vectors
            rObject, tObject, _objPoints = aruco.estimatePoseSingleMarkers(corners, marker_size, cameraMatrix=cMat, distCoeffs=dist) # corners, size of markers in meters, [3x3] intrinsic camera parameters, 5 distortion coefficients
            rvec = [rObject.item(0), rObject.item(1), rObject.item(2)]
            tvec = [tObject.item(0), tObject.item(1), tObject.item(2)]

            all_rvec.append(rvec)
            all_tvec.append(tvec)

            # show information on image
            frameWithMarkers = aruco.drawDetectedMarkers(gray, corners, ids=ids) # Draw marker borders
            cv2.putText(frameWithMarkers, "ID: " + str(ids), (0,64), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2, cv2.LINE_AA)  # Show detected IDs
            aruco.drawAxis(frameWithMarkers, cMat, dist, np.array(rvec), np.array(tvec), 0.1) #Draw Axis

            # Display the resulting frame
            if show_video:
                cv2.imshow('frame',frameWithMarkers)

        else:
            # Display: no IDs
            cv2.putText(gray, "No IDs", (0,64), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0),2,cv2.LINE_AA)
            if show_video:
                cv2.imshow('frame',gray)

        if save_output:
            out.write(gray)

        # Stop when q is pressed
        if cv2.waitKey(1) & 0xFF == ord('q'):
            loop=False

        i += 1
        if not webcam_stream:
            if i == n_frames:
                loop = False

    # When everything done, release the capture
    cap.release()
    if save_output:
        out.release()
    cv2.destroyAllWindows()

    return np.array(all_tvec), np.array(all_rvec)
예제 #16
0
    ret, frame = cap.read()  # Capture frame-by-frame
    print(frame.shape)
    # operations on the frame

    if ret == True:

        gray = cv2.cvtColor(
            frame,
            cv2.COLOR_BGR2GRAY)  # aruco.detectMarkers() requires gray image

        CHARUCO_PARAMS.adaptiveThreshConstant = 10
        # lists of ids and the corners belonging to each id
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, CHARUCO_DICT, parameters=CHARUCO_PARAMS)

        aruco.refineDetectedMarkers(gray, CHARUCO_BOARD, corners, ids,
                                    rejectedImgPoints)

        if np.all(ids != None):  # if there is at least one marker detected
            charucoretval, charucoCorners, charucoIds = aruco.interpolateCornersCharuco(
                corners, ids, gray, CHARUCO_BOARD)

            # try to create a birds eye view of the scene based on the charuco recognition
            try:

                main_corners = get_boundaries(charucoCorners, frame.shape[0])
                # pbar.set_description(f"corners: {main_corners}")
                for item in main_corners:
                    draw_point(frame, (int(item[0, 0]), int(item[0, 1])))
                print(f"corners: {main_corners}")

                new_corners = get_box(main_corners)
    def get_camera_pose_from_single_markerboard(self, msg):

        n_frame = msg.n_frame
        rospy.loginfo("Get camera pose of {} from markerboard".format(self.camera_name))
        header_frame_id = "{}_rgb_camera_link".format(self.camera_name)

        # basic parameters
        dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)   # dictionary id
        board = aruco.GridBoard_create(5, 7, 0.033, 0.004, dictionary)
        parameters =  aruco.DetectorParameters_create()
        camera_info = rospy.wait_for_message("/{}/rgb/camera_info".format(self.camera_name), CameraInfo)
        K = np.array(camera_info.K).reshape(3, 3)
        D = np.array(camera_info.D)

        pos_list = []
        quat_list = []
        n_sucess = 0
        # detect marker from image
        for n in range(n_frame):
            image = rospy.wait_for_message("/{}/rgb/image_raw".format(self.camera_name), Image)
            bridge = cv_bridge.CvBridge()
            frame = bridge.imgmsg_to_cv2(image, desired_encoding='bgr8')
            corners, ids, rejected = aruco.detectMarkers(frame, dictionary, parameters=parameters)
            corners, ids, rejected, recovered = aruco.refineDetectedMarkers(frame, board, corners, ids, rejected,
                                                                                K, D,
                                                                                errorCorrectionRate=-1,
                                                                                parameters=parameters)
            N, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, K, D, None, None)
            if N:
                cv2.aruco.drawAxis(frame, K, D, rvec, tvec, 0.2)
                cv2.aruco.drawDetectedMarkers(frame, corners, ids)
                self.aruco_img_pub.publish(bridge.cv2_to_imgmsg(frame))
                pos = tvec 
                rot = np.eye(4)
                rot[:3, :3] = np.squeeze(cv2.Rodrigues(rvec)[0])
                quat = t.quaternion_from_matrix(rot)
                pos_list.append(pos)
                quat_list.append(quat)
                n_sucess += 1
        
        if len(pos_list) == 0:
            rospy.logwarn("Failed to detect the marker board")
            return False
        
        pos, quat = orh.average_pq(pos_list, quat_list)
        source_frame = header_frame_id
        target_frame = "{}_markerboard".format(self.camera_name)
        static_tf_min = orh.pq_to_transform_stamped(pos, quat, source_frame, target_frame)

        # source_frame = "{}_markerboard".format(self.camera_name)
        # target_frame = header_frame_id
        # static_tf_min = orh.pq_to_transform_stamped(pos, quat, source_frame, target_frame)

        self.static_aruco_tfs.append(static_tf_min)
        rospy.loginfo("Publish static tf: {} -> {}_markerboard from ArUco".format(header_frame_id, self.camera_name))   

        # find target marker in world map
        target_marker = None
        for marker in self.world_map["markers"]:
            if marker["id"] == "board":
                target_marker = marker
        if target_marker is None: 
            rospy.logwarn("No information in world map for marker board")
        
        pos = target_marker["position"]
        pos = [-p for p in pos]
        quat = target_marker["orientation"] # TODO: invert quaternion 
        source_frame = "{}_markerboard".format(self.camera_name)
        target_frame = "world"
        static_tf_world_to_fid = orh.pq_to_transform_stamped(pos, quat, source_frame, target_frame)
        self.static_world_tfs.append(static_tf_world_to_fid)

        if msg.publish_worldmap:
            rospy.loginfo("Publish static tf: {}_markerboard -> world from world map ".format(self.camera_name))   
            self.br.sendTransform(self.static_aruco_tfs + self.static_world_tfs)
        else:
            self.br.sendTransform(self.static_aruco_tfs)
        
        self.save_transfrom_as_json("world", "{}_rgb_camera_link".format(self.camera_name))
        rospy.loginfo("Finished the camera pose calibration")
        return True 
예제 #18
0
    def image_callback(self, data):

        # --- SETUP STUFF --- #

        # Update the board we're looking for
        self.update_aruco_target()

        # Define variables for messages to publish
        pose_msg = Pose()
        bool_msg = Bool()
        avg_corners = 0

        # --- IMAGE PROCESSING --- #

        # Convert from ROS message to OpenCV image
        cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding="passthrough")

        # Find corners and IDs of aruco markers
        corners, ids, rejectedImgPoints = aruco.detectMarkers(cv_image, ARUCO_DICT, parameters=ARUCO_PARAM)

        # Refine for improved accuracy
        aruco.refineDetectedMarkers(cv_image, SOUTH_2x1_BOARD, corners, ids, rejectedImgPoints, self.cmatx, self.dist)

        # If markers found, estimate pose
        if ids is not None:

            aruco.drawDetectedMarkers(cv_image, corners, ids)

            if self.prev_rvec is None:
                _, self.prev_rvec, self.prev_tvec = aruco.estimatePoseBoard(corners, ids, SOUTH_2x1_BOARD, self.cmatx, self.dist)

            # Compute pose estimate based on board corner positions
            retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, SOUTH_2x1_BOARD, self.cmatx, self.dist, rvec=self.prev_rvec, tvec=self.prev_tvec, useExtrinsicGuess=True)

            # If succesful, convert rvec from rpy to quaternion, fill pose message
            if retval != 0:

                # Set param to true
                self.first_marker_detected = True

                self.prev_tvec = tvec
                self.prev_rvec = rvec


                # Compute pose of Camera relative to world

                dst, _ = cv2.Rodrigues(rvec)
                R = dst.T
                tvec = np.dot(-R, tvec)
                rvec, _ = cv2.Rodrigues(R)
                rospy.loginfo(rvec.shape)

                # Convert the 'rvec' from rpy to a quaternion
                quat = tf.transformations.quaternion_from_euler(rvec[1, 0], rvec[0, 0], -rvec[2, 0])

                # Store pose and quaternion information of Aruco Board
                pose_msg.position.x = tvec[2, 0]
                pose_msg.position.y = tvec[0, 0]
                pose_msg.position.z = tvec[1, 0]
                pose_msg.orientation.x = quat[0]
                pose_msg.orientation.y = quat[1]
                pose_msg.orientation.z = quat[2]
                pose_msg.orientation.w = quat[3]

                # Calculate the average x distance between the corners
                min = corners[0][0, 0, 0]
                max = corners[0][0, 0, 0]

                for corner in corners:
                    if np.min(corner[0, :, 0]) < min:
                        min = np.min(corner[0, :, 0])
                    elif np.max(corner[0, :, 0]) > max:
                        max = np.max(corner[0, :, 0])

                # Publish pose estimate
                self.pose_pub.publish(pose_msg)
                self.avg_of_corners.publish((min + max) / 2.0)

            # Update boolean message
            bool_msg.data = retval

        else:
            bool_msg.data = False

        cv2.imshow("image", cv_image)
        cv2.waitKey(1)

        # Always publish whether markers were found or not
        self.avg_of_corners.publish(avg_corners)
        self.bool_pub.publish(bool_msg)
    (ret, frame) = cap.read()

    # frame = frame[300:640,100:560]

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # cv2.imshow('original', frame)

    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    #cv2.imshow('grayed', gray)

    # ret3,gray = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    # cv2.imshow("thresholded", gray)

    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, dictionary, parameters=arucoParams)  # Detect aruco
    aruco.refineDetectedMarkers(gray, board, corners, ids, rejectedImgPoints)
    if ids is not None:  # if the aruco marker detected

        # rvec, tvec, objpoints = aruco.estimatePoseSingleMarkers(corners, markerLength, camera_Matrix, dist_Coff) # For a single marker
        # board = aruco.Board_create(objpoints,dictionary,ids)
        imgWithAruco = aruco.drawDetectedMarkers(frame, corners, ids,
                                                 (0, 255, 0))
        retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board,
                                                     camera_Matrix, dist_Coff)
        # imgWithAruco = aruco.drawAxis(frame, camera_Matrix, dist_Coff, rvec, tvec, 0.1)
        # imgWithArucwo = aruco.drawAxis(imgWithAruco, camera_Matrix, dist_Coff, rvec, tvec, 10) #balash weghet nazar abo balash katar meno # axis length 100 can be changed according to your requirement

        x_dist = round(tvec[0][0] * 100, 1)
        y_dist = round(tvec[1][0] * 100, 1)
        z_dist = round(tvec[2][0] * 100, 1)
        x_rot = round(rvec[0][0], 2)
예제 #20
0
    def calibrate_camera_aruco(self,
                               samples_dir="frames",
                               outputfile="camera_matrix_aruco.yaml"):
        self.outputfile = outputfile
        board = aruco.GridBoard_create(self.MarkerX, self.MarkerY, 0.05, 0.01,
                                       self.aruco_dict)
        cam_matrix, dist_coeff, rvecs, tvecs = self.read_cam_matrix(outputfile)
        print("dist_coeff original")
        print(dist_coeff)
        print("cam_matrix original")
        print(cam_matrix)
        im = Imutils_Master()
        all_img = []
        all_obj = []
        h, w = 0, 0
        file_count = 0
        total_makers_to_look = self.MarkerX * self.MarkerY
        for fname in os.listdir(samples_dir):
            file_count += 1
            img = cv2.imread(samples_dir + "/" + fname)
            h, w = img.shape[:2]
            gray = im.gray(img)
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray, self.aruco_dict, parameters=self.parameters)
            corners1, ids1, rejectedImgPoints1, rec_index = aruco.refineDetectedMarkers(
                gray, board, corners, ids, rejectedImgPoints)
            getcount = self.get_marker_count(ids1)

            if ids1 is not None and getcount == total_makers_to_look:
                ret, rv, tv = aruco.estimatePoseBoard(corners1, ids1, board,
                                                      cam_matrix, dist_coeff)
                if ret > 0:
                    aruco.drawDetectedMarkers(img, corners, ids, (0, 0, 255))
                    obj_points, img_points = aruco.getBoardObjectAndImagePoints(
                        board, corners1, ids1)
                    all_obj.append(obj_points)
                    all_img.append(img_points)
                else:
                    print("not able to estimate board")
            cv2.imshow("frame", img)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cv2.destroyAllWindows()
        print("calibrating starts... may take while")

        if len(all_obj) == len(all_img):
            rms, cam_matrix, dist_coeff, rvecs, tvecs = cv2.calibrateCamera(
                all_obj, all_img, (w, h), None, None)
            if rms:
                data = {
                    'camera_matrix': np.asarray(cam_matrix).tolist(),
                    'dist_coeff': np.asarray(dist_coeff).tolist(),
                    'rvecs': np.asarray(rvecs).tolist(),
                    'tvecs': np.asarray(tvecs).tolist()
                }
                flag = self.write_cam_matrix(outputfile, data)
                if flag:
                    print("camera details is written to file")
                    cam_matrix, dist_coeff, rvecs, tvecs = self.read_cam_matrix(
                        outputfile)
                    print("new cam_matrix")
                    print(cam_matrix)
                    print("new dist_coeff")
                    print(dist_coeff)
                else:
                    print("error writing camera details to file")

        else:
            print(
                "Number of object points is not equal to the number of samples taken"
            )
            print(len(all_obj))
            print(len(all_img))
예제 #21
0
cap = cv2.VideoCapture(videoFile)

while (True):
    ret, frame = cap.read()  # Capture frame-by-frame
    if ret == True:
        frame_remapped = cv2.remap(
            frame, map1, map2, cv2.INTER_LINEAR,
            cv2.BORDER_CONSTANT)  # for fisheye remapping
        frame_remapped_gray = cv2.cvtColor(
            frame_remapped,
            cv2.COLOR_BGR2GRAY)  # aruco.detectMarkers() requires gray image

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            frame_remapped_gray, aruco_dict,
            parameters=arucoParams)  # First, detect markers
        aruco.refineDetectedMarkers(frame_remapped_gray, board, corners, ids,
                                    rejectedImgPoints)

        if ids != None:  # if there is at least one marker detected
            charucoretval, charucoCorners, charucoIds = aruco.interpolateCornersCharuco(
                corners, ids, frame_remapped_gray, board)
            im_with_charuco_board = aruco.drawDetectedCornersCharuco(
                frame_remapped, charucoCorners, charucoIds, (0, 255, 0))
            retval, rvec, tvec = aruco.estimatePoseCharucoBoard(
                charucoCorners, charucoIds, board, camera_matrix,
                dist_coeffs)  # posture estimation from a charuco board
            if retval == True:
                im_with_charuco_board = aruco.drawAxis(
                    im_with_charuco_board, camera_matrix, dist_coeffs, rvec,
                    tvec, 100
                )  # axis length 100 can be changed according to your requirement
        else:
예제 #22
0
def show_webcam(mirror=False):
    global last_x, last_y, last_z, last_x_unfiltered, last_pitch_unfiltered, last_time, last_timestamps

    vs = VideoStream(src=0).start()

    slow_z = 0

    while True:
        img, cappos, capfps = vs.read()
        utcnowms = cappos  #int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
        fps = 0
        if last_time != None:
            if last_time == utcnowms:
                continue
            fps = 1000 / (utcnowms - last_time)

        last_time = utcnowms

        #(h, w) = img.shape[:2]
        #print(h,w)
        #center = (w/2,h/2)
        #M = cv2.getRotationMatrix2D(center, 180, 1)
        #img = cv2.warpAffine(img, M, (w, h))

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=arucoParams)

        if ids is not None:  # if aruco marker detected
            #rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, markerLength, camera_matrix, dist_coeffs) # For a single marker
            img = aruco.drawDetectedMarkers(img, corners, ids, (0, 255, 0))

            v, rvec, tvec = aruco.estimatePoseBoard(corners,
                                                    ids,
                                                    board_small,
                                                    camera_matrix,
                                                    dist_coeffs,
                                                    None,
                                                    None,
                                                    useExtrinsicGuess=False)
            #print("board small", v, "rvec", rvec, "tvec", tvec)

            if v > 0:
                corners2, ids2, rejected2, recovered2 = aruco.refineDetectedMarkers(
                    gray,
                    board_small,
                    corners,
                    ids,
                    rejectedImgPoints,
                    camera_matrix,
                    dist_coeffs,
                    parameters=arucoParams)
                v, rvec, tvec = aruco.estimatePoseBoard(
                    corners2,
                    ids2,
                    board_small,
                    camera_matrix,
                    dist_coeffs,
                    None,
                    None,
                    useExtrinsicGuess=False)
                #img = aruco.drawAxis(img, camera_matrix, dist_coeffs, rvec, tvec, 3.21 / 2)

                # move small_board origin to slightly forward to robot body origin
                small_board_center = (smbm_len + smbm_sep / 2)  # cm, see above
                tag_to_body_center_height = 0  #2 + 0.5 # cm
                rot, _ = cv2.Rodrigues(rvec)
                centering_vec = np.dot(
                    rot,
                    np.array([[
                        small_board_center + 2, small_board_center,
                        -tag_to_body_center_height
                    ]]).T)
                tvec[0] += centering_vec[0]
                tvec[1] += centering_vec[1]
                tvec[2] += centering_vec[2]
                img = aruco.drawAxis(img, camera_matrix, dist_coeffs, rvec,
                                     tvec, 3.21 / 2)

            v2, rvec2, tvec2 = aruco.estimatePoseBoard(corners,
                                                       ids,
                                                       board_big,
                                                       camera_matrix,
                                                       dist_coeffs,
                                                       None,
                                                       None,
                                                       useExtrinsicGuess=False)
            #print("board big", v2, rvec2, tvec2)
            if v2 > 0:
                corners2, ids2, rejected2, recovered2 = aruco.refineDetectedMarkers(
                    gray,
                    board_big,
                    corners,
                    ids,
                    rejectedImgPoints,
                    camera_matrix,
                    dist_coeffs,
                    parameters=arucoParams)
                v2, rvec2, tvec2 = aruco.estimatePoseBoard(
                    corners2,
                    ids2,
                    board_big,
                    camera_matrix,
                    dist_coeffs,
                    None,
                    None,
                    useExtrinsicGuess=False)
                img = aruco.drawAxis(img, camera_matrix, dist_coeffs, rvec2,
                                     tvec2, 3.21 / 2)

            t = None  # 16
            m = None  # 684, 305

            if False:
                for i, e in enumerate(ids):
                    #print(e)
                    if e == 684:
                        img = aruco.drawAxis(img, camera_matrix, dist_coeffs,
                                             rvec[i], tvec[i], 4.8 / 2)
                        t = [rvec[i], tvec[i]]

                    if e == 305:
                        m = [rvec[i], tvec[i]]

            if v > 0 and v2 > 0:
                # both the floor board (v2) and agent board (v) have been detected
                rv, tv = get_relative_position(rvec, tvec, rvec2, tvec2)
                #print("tv", tv)
                #print("rv", rv)
                #print("dist", np.linalg.norm(tv))
                dist = np.linalg.norm(tv)

                if True:
                    linedist = 40
                    cv2.putText(img,
                                "Ant distance from reference to ant: %2.1fcm" %
                                dist, (20, 40),
                                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=linedist / 30,
                                color=(0, 0, 255),
                                thickness=2)
                    cv2.putText(
                        img,
                        "Ant position: x: %2.1fcm y: %2.1fcm z: %2.1fcm" %
                        (tv[0], tv[1], tv[2]), (20, 40 + linedist),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=linedist / 30,
                        color=(0, 0, 255),
                        thickness=2)
                    cv2.putText(
                        img,
                        "Ant orientation: roll: %2.1fdeg pitch: %2.1fdeg yaw: %2.1fdeg"
                        % (rv[0] / np.pi * 180, rv[1] / np.pi * 180,
                           rv[2] / np.pi * 180), (20, 40 + linedist * 2),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=linedist / 30,
                        color=(0, 0, 255),
                        thickness=2)

                a = 0.3
                slow_z = a * tv[2] + (1 - a) * slow_z

                d = OrderedDict()
                d["id"] = "external_tag_tracking_camera"
                d["server_epoch_ms"] = utcnowms
                d["dist"] = float(dist)
                d["x"] = float(tv[0]) / 100.  # in m
                d["y"] = float(tv[1]) / 100.  # in m
                d["z"] = float(slow_z) / 100.  # in m
                d["roll"] = float(rv[0] / np.pi * 180)
                d["pitch"] = float(rv[1] / np.pi * 180)
                d["yaw"] = float(rv[2] / np.pi * 180)
                d["sent"] = False

                # filtering

                last_x_unfiltered.append(d["x"])
                last_x_unfiltered = last_x_unfiltered[-3:]

                if len(last_x_unfiltered) > 2:
                    xabsmaxdiff = np.abs(np.amax(np.diff(last_x_unfiltered)))
                else:
                    xabsmaxdiff = 0

                last_pitch_unfiltered.append(d["pitch"])
                last_pitch_unfiltered = last_pitch_unfiltered[-3:]

                if len(last_pitch_unfiltered) > 2:
                    pitchabsmaxdiff = np.abs(
                        np.amax(np.diff(last_pitch_unfiltered)))
                else:
                    pitchabsmaxdiff = 0

                if d["z"] < -0.05:
                    print("negative z, less than -0.05m, ignoring")
                elif xabsmaxdiff > 0.15:
                    print("more than 15cm jump in x, ignoring")
                elif pitchabsmaxdiff > 20:
                    print("more than 20deg jump in pitch, ignoring")
                else:
                    N = 5
                    last_x.append(d["x"])
                    last_x = last_x[-N:]
                    last_y.append(d["y"])
                    last_y = last_y[-N:]
                    last_z.append(d["z"])
                    last_z = last_z[-N:]

                    last_timestamps.append(d["server_epoch_ms"] * 1e-3)  # in s
                    last_timestamps = last_timestamps[-N:]

                    if len(last_x) >= N:
                        xvel = (last_x[-1] - last_x[-2]) / (
                            last_timestamps[-1] - last_timestamps[-2])
                        xvel_hd5 = holoborodko_diff(
                            np.array(last_x),
                            float(np.mean(np.diff(np.array(last_timestamps)))))

                        d["xvel_raw"] = xvel
                        d["xvel_hd5"] = xvel_hd5
                        d["xvel"] = d["xvel_hd5"]

                        yvel = (last_y[-1] - last_y[-2]) / (
                            last_timestamps[-1] - last_timestamps[-2])
                        yvel_hd5 = holoborodko_diff(
                            np.array(last_y),
                            float(np.mean(np.diff(np.array(last_timestamps)))))

                        d["yvel_raw"] = yvel
                        d["yvel_hd5"] = yvel_hd5
                        d["yvel"] = d["yvel_hd5"]

                        zvel = (last_z[-1] - last_z[-2]) / (
                            last_timestamps[-1] - last_timestamps[-2])
                        zvel_hd5 = holoborodko_diff(
                            np.array(last_z),
                            float(np.mean(np.diff(np.array(last_timestamps)))))

                        d["zvel_raw"] = zvel
                        d["zvel_hd5"] = zvel_hd5
                        d["zvel"] = d["zvel_hd5"]

                        sock.send_json(d)
                    d["sent"] = True

                print("fps %5.1f " % fps, end='')
                print(
                    "- %d dist %2.1f x %1.3fm y %1.3fm z %1.3fm roll %3.0f pitch %3.0f yaw %3.0f"
                    % tuple([
                        d[x] for x in [
                            "server_epoch_ms", "dist", "x", "y", "z", "roll",
                            "pitch", "yaw"
                        ]
                    ]),
                    end='')
                if "xvel" in d:
                    print(" xvel%6.3f %6.3f y%6.3f z%6.3f" %
                          (d["xvel"], d["xvel_raw"], d["yvel"], d["zvel"]))
                else:
                    print()

                logfile.write(json.dumps(d) + '\n')

        if mirror:
            img = cv2.flip(img, 1)

        if True:
            cv2.imshow('ant', img)
            if cv2.waitKey(1) == 27:
                break  # esc to quit
    cv2.destroyAllWindows()