Пример #1
0
    def __init__(self):
        camera_id = str(rospy.get_param('~camera_id'))
        color_width = rospy.get_param('~color_width')
        color_high = rospy.get_param('~color_high')
        charuco_row = rospy.get_param('~charuco_row')
        charuco_col = rospy.get_param('~charuco_col')
        square_length = rospy.get_param('~square_length')
        marker_length = rospy.get_param('~marker_length')
        self.cnd = 0
        self.frameId = 0
        self.pipeline = rs.pipeline()
        rs_config = rs.config()
        rs_config.enable_stream(rs.stream.color, color_width, color_high,
                                rs.format.bgr8, 30)
        self.pipeline.start(rs_config)

        # Constant parameters used in Aruco methods
        self.aruco_param = aruco.DetectorParameters_create()
        self.aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
        # Create grid board object we're using in our stream
        self.charuco_board = aruco.CharucoBoard_create(
            squaresX=charuco_col,
            squaresY=charuco_row,
            squareLength=square_length,
            markerLength=marker_length,
            dictionary=self.aruco_dict)

        # Check for camera calibration data

        config = ConfigParser.ConfigParser()
        config.optionxform = str
        rospack = rospkg.RosPack()
        self.curr_path = rospack.get_path('hand_eye')
        config.read(self.curr_path + '/config/camera_' + str(camera_id) +
                    '_internal.ini')
        internal_name = 'Internal_' + str(color_width) + '_' + str(color_high)
        b00 = float(config.get(internal_name, "Key_1_1"))
        b01 = float(config.get(internal_name, "Key_1_2"))
        b02 = float(config.get(internal_name, "Key_1_3"))
        b10 = float(config.get(internal_name, "Key_2_1"))
        b11 = float(config.get(internal_name, "Key_2_2"))
        b12 = float(config.get(internal_name, "Key_2_3"))
        b20 = float(config.get(internal_name, "Key_3_1"))
        b21 = float(config.get(internal_name, "Key_3_2"))
        b22 = float(config.get(internal_name, "Key_3_3"))

        self.cameraMatrix = np.mat([[b00, b01, b02], [b10, b11, b12],
                                    [b20, b21, b22]])
        # c_x = 643.47548083
        # c_y = 363.67742746
        # f_x = 906.60886808
        # f_y = 909.34831447
        # k_1 = 0.16962942
        # k_2 = -0.5560001
        # p_1 = 0.00116353
        # p_2 = -0.00122694
        # k_3 = 0.52491878

        # c_x = 649.007507324219
        # c_y = 356.122222900391
        # f_x = 922.76806640625
        # f_y = 923.262023925781

        # self.cameraMatrix = np.array([[f_x, 0, c_x],
        #                        [0, f_y, c_y],
        #                        [0, 0, 1]])
        # self.distCoeffs = np.array([k_1, k_2, p_1, p_2, k_3])
        self.distCoeffs = np.array([0.0, 0, 0, 0, 0])

        # Create vectors we'll be using for rotations and translations for postures
        self.rvecs = None
        self.tvecs = None
        self.rvecs_arr = np.zeros((3, NUMBER))
        self.tvecs_arr = np.zeros((3, NUMBER))

        self.cam = None
        self.QueryImg = None
        self.init_server()
        frames = self.pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        self.QueryImg = np.asanyarray(color_frame.get_data())
Пример #2
0
    if cameraMatrix is None or distCoeffs is None:
        print(
            "Calibration issue. Remove ./calibration/ProCamCalibration.pckl and recalibrate your camera with CalibrateCamera.py."
        )
        exit()

# Constant parameters used in Aruco methods
ARUCO_PARAMETERS = aruco.DetectorParameters_create()
ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_5X5_50)
CHARUCOBOARD_ROWCOUNT = 8
CHARUCOBOARD_COLCOUNT = 6

# Create grid board object we're using in our stream
CHARUCO_BOARD = aruco.CharucoBoard_create(squaresX=CHARUCOBOARD_COLCOUNT,
                                          squaresY=CHARUCOBOARD_ROWCOUNT,
                                          squareLength=0.04,
                                          markerLength=0.02,
                                          dictionary=ARUCO_DICT)

# Create vectors we'll be using for rotations and translations for postures
rvecs, tvecs = None, None

# cam = cv2.VideoCapture('charucoboard-test-at-kyles-desk.mp4')
cam = cv2.VideoCapture('ChAruco_Circles.webm')

while (cam.isOpened()):
    # Capturing each frame of our video stream
    ret, QueryImg = cam.read()
    if ret == True:
        # grayscale image
        gray = cv2.cvtColor(QueryImg, cv2.COLOR_BGR2GRAY)
Пример #3
0
def camera_calibration(dataDir):
    # This file can be used to generate camera calibration parameters
    # to improve the default values

    fileNameK = "{}intrinsic.txt".format(dataDir)
    fileNameD = "{}distCoeffs.txt".format(dataDir)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
    board = aruco.CharucoBoard_create(6, 4, 0.056, 0.042, aruco_dict)

    allCorners = []
    allIds = []
    decimator = 0

    images = np.array(
        [dataDir + f for f in os.listdir(dataDir) if f.endswith(".png")])
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,
                0.00001)

    for im in images:
        print("=> Processing image {0}".format(im))
        frame = cv2.imread(im)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            gray, aruco_dict)

        if len(corners) > 0:
            # SUB PIXEL DETECTION
            for corner in corners:
                cv2.cornerSubPix(gray,
                                 corner,
                                 winSize=(3, 3),
                                 zeroZone=(-1, -1),
                                 criteria=criteria)
            res2 = cv2.aruco.interpolateCornersCharuco(corners, ids, gray,
                                                       board)
            if res2[1] is not None and res2[2] is not None and len(
                    res2[1]) > 3 and decimator % 1 == 0:
                allCorners.append(res2[1])
                allIds.append(res2[2])

        print("Image: {}/{}".format(decimator + 1, len(images)))
        print("Corners found: {}".format(len(corners)))
        decimator += 1

    imsize = gray.shape
    print("\n")
    print("Checkerboard detected in: {}/{} images".format(
        len(allCorners), decimator))

    cameraMatrixInit = np.array([[1000., 0., imsize[0] / 2.],
                                 [0., 1000., imsize[1] / 2.], [0., 0., 1.]])

    distCoeffsInit = np.zeros((5, 1))
    flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL +
             cv2.CALIB_FIX_ASPECT_RATIO)
    (ret, camera_matrix, distortion_coefficients0, rotation_vectors,
     translation_vectors, _, _, _) = cv2.aruco.calibrateCameraCharucoExtended(
         charucoCorners=allCorners,
         charucoIds=allIds,
         board=board,
         imageSize=imsize,
         cameraMatrix=cameraMatrixInit,
         distCoeffs=distCoeffsInit,
         flags=flags,
         criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000,
                   1e-9))

    np.savetxt(fileNameK, camera_matrix, delimiter=',')
    np.savetxt(fileNameD, distortion_coefficients0, delimiter=',')

    i = 5  # select image id
    plt.figure()
    frame = cv2.imread(images[i])
    img_undist = cv2.undistort(frame, camera_matrix, distortion_coefficients0,
                               None)
    plt.subplot(1, 2, 1)
    plt.imshow(frame)
    plt.title("Raw image")
    plt.axis("off")
    plt.subplot(1, 2, 2)
    plt.imshow(img_undist)
    plt.title("Corrected image")
    plt.axis("off")
    plt.show()
Пример #4
0
def startTracking(marker_type, MLRSTr, n_sq_y, n_sq_x, squareLen, markerLen,
                  aruco_dict, socket_connect, c):
    RSTr = None
    #if Charuco board is the marker type, execute following if statement
    if marker_type == 1:
        rvec = np.zeros((1, 3))
        tvec = np.zeros((1, 3))
        objectPoints = np.zeros((1, 3))
    #configure Realsense camera stream
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    profile = pipeline.start(config)
    #get realsense stream intrinsics
    intr = profile.get_stream(rs.stream.color).as_video_stream_profile(
    ).get_intrinsics()  # profile.as_video_stream_profile().get_intrinsics()
    mtx = np.array([[intr.fx, 0, intr.ppx], [0, intr.fy, intr.ppy], [0, 0, 1]])
    dist = np.array(intr.coeffs)

    while (True):
        #start streaming
        frames = pipeline.wait_for_frames()
        #get color frame
        color_frame = frames.get_color_frame()
        input_img = np.asanyarray(color_frame.get_data())
        # operations on the frame - convert to grayscale
        gray = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)

        # detector parameters can be set here.
        parameters = aruco.DetectorParameters_create()
        parameters.adaptiveThreshConstant = 10
        # lists of ids and the corners belonging to each id
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        # font for displaying text (below)
        font = cv2.FONT_HERSHEY_SIMPLEX

        if marker_type == 1:
            # check if the ids list is not empty
            if (ids is not None):
                # create Charudo board
                charuco_board = aruco.CharucoBoard_create(
                    n_sq_x, n_sq_y, squareLen, markerLen, aruco_dict)
                ret, ch_corners, ch_ids = aruco.interpolateCornersCharuco(
                    corners, ids, gray, charuco_board)
                # if there are enough corners to get a reasonable result
                if (ret > 3):
                    aruco.drawDetectedCornersCharuco(input_img, ch_corners,
                                                     ch_ids, (0, 0, 255))
                    #estimate Charuco board pose
                    retval, rvec, tvec = cv2.aruco.estimatePoseCharucoBoard(
                        ch_corners, ch_ids, charuco_board, mtx, dist, None,
                        None)  # , useExtrinsicGuess=False)

                    print("pose", retval, rvec, tvec)
                    # if a pose could be estimated
                    if (retval):
                        # draw axis showing pose of board
                        rvec = np.reshape(rvec, (1, 3))
                        tvec = np.reshape(tvec, (3, 1))
                        aruco.drawAxis(input_img, mtx, dist, rvec, tvec, 0.1)
                        #convert to rotation matrix from rotation vector
                        R_rvec = R.from_rotvec(rvec)
                        R_rotmat = R_rvec.as_matrix()
                        # get transformation matrix combining tvec and rotation matrix
                        RSTr = np.hstack([R_rotmat.reshape((3, 3)), tvec])
                        RSTr = np.vstack([RSTr, [0, 0, 0, 1]])
                        print("RSTr", RSTr)

            else:
                # show 'No Ids' when no markers are found
                cv2.putText(input_img, "No Ids", (0, 64), font, 1, (0, 255, 0),
                            2, cv2.LINE_AA)

        # following part is executed if single Aruco marker is selected
        else:

            if np.all(ids != None):
                # estimate pose of each marker and return the values
                rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                    corners, markerLen, mtx, dist)
                # (rvec-tvec).any() # get rid of that nasty numpy value array error
                for i in range(0, ids.size):
                    # draw axis for the aruco markers
                    aruco.drawAxis(input_img, mtx, dist, rvec[i], tvec[i], 0.1)
                    # draw a square around the markers
                aruco.drawDetectedMarkers(input_img, corners)
                #select only the first marker in the list (assumes single Aruco marker is in the scene)
                #convert to rotation matrix from rotation vector
                R_rvec = R.from_rotvec(rvec[0])
                R_rotmat = R_rvec.as_matrix()
                # get transformation matrix combining tvec and rotation matrix
                RSTr = np.hstack([R_rotmat[0], tvec[0].transpose()])
                RSTr = np.vstack([RSTr, [0, 0, 0, 1]])

        if socket_connect == 1 and RSTr is not None:
            # transform the pose from OpenCV's right-handed rule to Unity's left-handed rule
            RSTr_LH = np.array([
                RSTr[0][0], RSTr[0][2], RSTr[0][1], RSTr[0][3], RSTr[2][0],
                RSTr[2][2], RSTr[2][1], RSTr[2][3], RSTr[1][0], RSTr[1][2],
                RSTr[1][1], RSTr[1][3], RSTr[3][0], RSTr[3][1], RSTr[3][2],
                RSTr[3][3]
            ])  # converting to left handed coordinate system
            RSTr_LH = RSTr_LH.reshape(4, 4)
            # compute transformed pose to send to MagicLeap
            PoseTr = np.matmul(MLRSTr, RSTr_LH)
            # Head Pose matrix in the form of array to be sent
            ArrToSend = np.array([
                PoseTr[0][0], PoseTr[0][1], PoseTr[0][2], PoseTr[0][3],
                PoseTr[1][0], PoseTr[1][1], PoseTr[1][2], PoseTr[1][3],
                PoseTr[2][0], PoseTr[2][1], PoseTr[2][2], PoseTr[2][3],
                PoseTr[3][0], PoseTr[3][1], PoseTr[3][2], PoseTr[3][3]
            ])

            # pack the array to be sent before sending
            dataTosend = struct.pack('f' * len(ArrToSend), *ArrToSend)

            if socket_connect == 1:  # and img_idx % skip_frame_send == 0:  #
                # img_sent = img_idx
                c.send(dataTosend)

        # display the resulting frame
        cv2.imshow('frame', input_img)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
Пример #5
0
import time
import cv2.aruco as A
import cv2
import numpy as np

dictionary = A.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
board = A.CharucoBoard_create(5, 7, 1, 0.75, dictionary)
#img = board.draw((200*3,200*3))

#Dump the calibration board to a file
#cv2.imwrite('charuco.png',img)

#Start capturing images for calibration
cap = cv2.VideoCapture(4)

allCorners = []
allIds = []
decimator = 0
for i in range(300):

    ret, frame = cap.read()
    if not ret:
        break
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    res = cv2.aruco.detectMarkers(gray, dictionary)

    if len(res[0]) > 0:
        res2 = cv2.aruco.interpolateCornersCharuco(res[0], res[1], gray, board)
        if res2[1] is not None and res2[2] is not None and len(
                res2[1]) > 3 and decimator % 3 == 0:
            allCorners.append(res2[1])
Пример #6
0
def createCharucoBoard():
    aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_50)
    board = aruco.CharucoBoard_create(6, 8, 0.025, 0.02, aruco_dict)
    img = board.draw((200 * 3, 200 * 3))
    cv2.imwrite("CharucoBoard.jpg", img)
Пример #7
0
def generateCharucoBoard():
    board = aruco.CharucoBoard_create(7, 5, 0.04, 0.02, aruco_dict)
    imboard = board.draw((2000, 2000))
    cv.imwrite('charuco_board.tiff', imboard)
    return board
# Amaan Vally
# Written using Python 3.8 and OpenCV 4.4.0
# Tested on Windows 10
import cv2
import cv2.aruco as aruco

# Create ChArUco board
# Adjust squaresX, squaresY, squareLength and markerLength as desired
gridboard = aruco.CharucoBoard_create(squaresX=5,
                                      squaresY=7,
                                      squareLength=0.045,
                                      markerLength=0.0275,
                                      dictionary=aruco.Dictionary_get(
                                          aruco.DICT_5X5_1000))

# Create an image from the gridboard and store to "ChArUco Board.jpg"
# 96 DPI jpg, so image size max 1123x1587 for A3
img = gridboard.draw(outSize=(1123, 1587))
cv2.imwrite("ChArUco Board.jpg", img)

# Show the ChArUco board on screen
cv2.imshow('ChArUco Board', img)

# Exit when a key is pressed
cv2.waitKey(0)
cv2.destroyAllWindows()
Пример #9
0
    def calibrateCamera(self, rows=7, columns=5, lengthSquare=0.0354, lengthMarker=0.0177):
        # Create charuco board with actual measured dimensions from print out
        board = aruco.CharucoBoard_create(
                    squaresX=columns,
                    squaresY=rows,
                    squareLength=lengthSquare,
                    markerLength=lengthMarker,
                    dictionary=self.arucoDict)

        # Sub pixel corner detection criteria 
        subPixCriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)

        # Storage variables
        cornerList = []
        idList = []

        # Get image paths from calibration folder
        paths = glob.glob(self.calibrationDir + '*' + self.imgExtension)

        # Empty imageSize variable to be determined at runtime
        imageSize = None

        # Loop through all images
        for filePath in paths:
            # Read image and convert to gray
            img = cv2.imread(filePath)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            # Find aruco markers in the query image
            corners, ids, _ = aruco.detectMarkers(image=gray, dictionary=self.arucoDict)

            # Sub pixel detection
            for corner in corners:
                cv2.cornerSubPix(
                    image=gray, 
                    corners=corner,
                    winSize = (3,3),
                    zeroZone = (-1,-1),
                    criteria = subPixCriteria)

            # Get charuco corners and ids from detected aruco markers
            response, charucoCorners, charucoIDs = aruco.interpolateCornersCharuco(
                    markerCorners=corners,
                    markerIds=ids,
                    image=gray,
                    board=board)

            # If at least 20 corners were found
            if response > 20:
                # Add the corners and ids to calibration list
                cornerList.append(charucoCorners)
                idList.append(charucoIDs)

                # If image size is still None, set it to the image size
                if not imageSize:
                    imageSize = gray.shape[::-1]
                    
            else:
                # Error message
                print('Error in: ' + str(filePath))

        # Make sure at least one image was found
        if len(paths) < 1:
            print('No images of charucoboards were found.')
            return

        # Make sure at least one charucoboard was found
        if not imageSize:
            print('Images supplied were not regonized by calibration')
            return

        # Start a timer
        startTime = time.time()

        # Run calibration
        _, self.mtx, self.dist, _, _ = aruco.calibrateCameraCharuco(
                charucoCorners=cornerList,
                charucoIds=idList,
                board=board,
                imageSize=imageSize,
                cameraMatrix=None,
                distCoeffs=None)

        # Print how long it took
        print("Calibration took: ", round(time.time()-startTime),' s')

        # Display matrix and distortion coefficients
        print('Image size: ', imageSize)
        print(self.mtx)
        print(self.dist)

        # Pickle the results
        f = open('resources/calibration.pckl', 'wb')
        pickle.dump((self.mtx, self.dist), f)
        f.close()
def main():

    # First we set up the cameras to start streaming
    # Define some constants
    resolution_width = 1280  # pixels
    resolution_height = 720  # pixels
    frame_rate = 30  # fps
    dispose_frames_for_stablisation = 30  # frames

    # Enable the streams from all the intel realsense devices
    rs_config = rs.config()
    rs_config.enable_stream(rs.stream.depth, resolution_width,
                            resolution_height, rs.format.z16, frame_rate)
    rs_config.enable_stream(rs.stream.infrared, 1, resolution_width,
                            resolution_height, rs.format.y8, frame_rate)
    rs_config.enable_stream(rs.stream.color, resolution_width,
                            resolution_height, rs.format.bgr8, frame_rate)

    # Use the device manager class to enable the devices and get the frames
    device_manager = DeviceManager(rs.context(), rs_config)
    device_manager.enable_all_devices()
    device_manager._available_devices.sort()
    device_list = device_manager._available_devices

    # Allow some frames for the auto-exposure controller to stablise
    for frame in range(dispose_frames_for_stablisation):
        frames = device_manager.poll_frames_keep()

    assert (len(device_manager._available_devices) > 0)

    #Then we calibrate the images

    # Get the intrinsics of the realsense device
    intrinsics_devices = device_manager.get_device_intrinsics(frames)

    # Set the charuco board parameters for calibration
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    charuco_width = 8
    charuco_height = 5
    square_length = 0.03425
    marker_length = .026

    coordinate_dimentions = 3
    charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height,
                                              square_length, marker_length,
                                              aruco_dict)
    # Set the charuco board parameters for robot
    aruco_dict_r = aruco.Dictionary_get(aruco.DICT_4X4_250)
    charuco_width_r = 3
    charuco_height_r = 3
    square_length_r = 0.0311
    marker_length_r = .0247

    charuco_board_robot = aruco.CharucoBoard_create(charuco_width_r,
                                                    charuco_height_r,
                                                    square_length_r,
                                                    marker_length_r,
                                                    aruco_dict_r)

    # Set the charuco board parameters for robot
    aruco_dict_ro = aruco.Dictionary_get(aruco.DICT_5X5_100)
    charuco_width_ro = 3
    charuco_height_ro = 3
    square_length_ro = 0.0311
    marker_length_ro = .0247

    charuco_board_robot_2 = aruco.CharucoBoard_create(charuco_width_ro,
                                                      charuco_height_ro,
                                                      square_length_ro,
                                                      marker_length_ro,
                                                      aruco_dict_ro)

    # Decide if you want to use previous calibration or make a new one
    calibration_decision = input(
        'To use previous calibration, press "y". For new calibration press any key \n'
    )
    if calibration_decision != 'y':
        # Choose amount of frames to average
        correct_input = False
        while not correct_input:
            the_input = input(
                "Write amount of frames you want to use to calibrate \n")
            try:
                amount_frames = int(the_input)
                if amount_frames > 0:
                    correct_input = True
                else:
                    print("Frame count has to be positive")
            except ValueError:
                print('Input has to be int')

        # Make a dict to store all images for calibration
        frame_dict = {}
        transform_dict = {}
        rms_dict = {}
        for from_device in device_list:
            transform_dict[from_device] = {}
            rms_dict[from_device] = {}
            for to_device in device_list:
                transform_dict[from_device][to_device] = {}
                rms_dict[from_device][to_device] = np.inf
        print("Starting to take images in 5 seconds")
        time.sleep(5)
        devices_stitched = False

        while not devices_stitched:
            print("taking new set of  images")
            for frame_count in range(amount_frames):
                print("taking image")
                print(amount_frames - frame_count, "images left")
                frames = device_manager.poll_frames_keep()
                time.sleep(0.5)
                frame_dict[frame_count] = {}
                for device in device_list:
                    ir_frame = np.asanyarray(
                        frames[device][(rs.stream.infrared, 1)].get_data())
                    depth_frame = np.asanyarray(
                        post_process_depth_frame(
                            frames[device][rs.stream.depth]).get_data())
                    frame_dict[frame_count][device] = {
                        'ir_frame': ir_frame,
                        'depth_frame': depth_frame
                    }
                del frames

            # Make transfer matrices between all possible cameras
            for idx, from_device in enumerate(device_list[:-1]):
                for to_device in device_list[idx + 1:]:
                    if to_device != from_device:
                        temp_transform, temp_rms = get_transformation_matrix_wout_rsobject(
                            frame_dict, [from_device, to_device],
                            intrinsics_devices, charuco_board)
                        if temp_rms < rms_dict[from_device][to_device]:
                            rms_dict[from_device][to_device] = temp_rms
                            rms_dict[to_device][from_device] = temp_rms
                            transform_dict[from_device][
                                to_device] = temp_transform
                            transform_dict[to_device][
                                from_device] = temp_transform.inverse()

            #Use Djikstra's to fin shortest path and check if all cameras are connected
            transformation_matrices = least_error_transfroms(
                transform_dict, rms_dict)
            if transformation_matrices != 0:
                devices_stitched = True
            # Prints rms error between camera transforms
            test = matrix_viewer(rms_dict)
            print(test)

        # Turns transformation matrices into Transfomation objects
        transformation_devices = {}
        for matrix in transformation_matrices:
            the_matirx = transformation_matrices[matrix]
            transformation_devices[matrix] = Transformation(
                the_matirx[:3, :3], the_matirx[:3, 3])

        # Saves calibration transforms if the user wants to
        save_calibration = input(
            'Press "y" if you want to save calibration \n')
        if save_calibration == "y":
            calibration_name = input
            saved = False
            while not saved:
                name = input(
                    "Type in name of file to save. remember to end name with '.npy' \n"
                )
                try:
                    np.save(name, transformation_matrices)
                    saved = True
                except:
                    print(
                        "could not save, try another name and remember '.npy' in the end"
                    )

        frame_dict.clear()
    else:
        correct_filename = False
        while not correct_filename:
            file_name = input('Type in calibration file name \n')
            try:
                transfer_matirices_save = np.load(file_name, allow_pickle=True)
                transfer_matrices = transfer_matirices_save[()]
                correct_filename = True
            except:
                print('No such file in directory: "', file_name, '"')
        transformation_devices = {}
        for matrix in transfer_matrices:
            the_matrix = transfer_matrices[matrix]
            transformation_devices[matrix] = Transformation(
                the_matrix[:3, :3], the_matrix[:3, 3])

    print("Calibration completed...")

    # Enable the emitter of the devices and extract serial numbers to identify cameras
    device_manager.enable_emitter(True)
    key_list = device_manager.poll_frames().keys()

    while True:
        visualisation = input(
            'Presss "1" for RMS error, "2" for chessboard visualisation and "3" for 3d pointcloud and "4" for robot to camea calibration\n'
        )

        if visualisation == '1':

            calculate_avg_rms_error(device_list, device_manager,
                                    transformation_devices, charuco_board,
                                    intrinsics_devices)

        elif visualisation == '2':

            visualise_chessboard(device_manager, device_list,
                                 intrinsics_devices, charuco_board,
                                 transformation_devices)

        elif visualisation == '3':

            visualise_point_cloud(key_list, resolution_height,
                                  resolution_width, device_manager,
                                  coordinate_dimentions,
                                  transformation_devices)
        elif visualisation == '4':
            robot_calibration = CameraRobotCalibration(transformation_devices,
                                                       device_manager,
                                                       charuco_board_robot_2,
                                                       charuco_board_robot)
            input("Set target in position for calibration\n")
            test = robot_calibration.test_functions()
            print(test)

        else:
            print("Input not recognised")
Пример #11
0
 def createCheckerBoard(self):
     board = aruco.CharucoBoard_create(3, 3, 1, 0.8, self.aruco_dict)
     imboard = board.draw((4000, 4000))
     return board
Пример #12
0
import numpy as np
import cv2, os
from cv2 import aruco
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd

arucoParams = aruco.DetectorParameters_create()
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
markerLength = 1
markerSeparation = 0.5
targetwidth = 4
targetheight = 4

board = aruco.CharucoBoard_create(targetwidth, targetheight, markerLength,
                                  markerSeparation, aruco_dict)
imboard = board.draw((1000, 1000))
handle = plt.imshow(imboard, cmap='gray')
plt.axis('off')
plt.savefig(basefolder + "/charuco_markers.png")

camera = cv2.VideoCapture(0)

while True:
    status, img_charuco = camera.read()
    im_gray = cv2.cvtColor(img_charuco, cv2.COLOR_RGB2GRAY)
    h, w = im_gray.shape[:2]
    dst = cv2.undistort(im_gray, camera_matrix, distortion_coff, None,
                        newcameramtx)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        dst, aruco_dict, parameters=arucoParams)
Пример #13
0
import csv

cwd = os.getcwd()
fk_ur5 = UR5Kin()

################### Initialisation ################################
camera = 2  # number of cameras
rootDir = "/home/zheyu/ur5_calib_toolkit/ZED/"
robotYAML = "{}robotCalibration.yaml".format(rootDir)
cameraYAML = "cameraCalibration.yaml"
extrinsicDataDir = "{}calibDataset/".format(rootDir)
tableDataDir = "{}tableCalibDataset/".format(rootDir)
fileName = "jointAngles.csv"
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_1000)  # 4X4 = 4x4 bit markers
charuco_board = aruco.CharucoBoard_create(
    4, 6, 0.063, 0.048,
    aruco_dict)  # width, height (full board), square length, marker length
##################################################################


class chArucoCalib:
    def __init__(self):

        images = np.array([
            extrinsicDataDir + f for f in os.listdir(extrinsicDataDir)
            if f.endswith(".png")
        ])
        order = np.argsort(
            [int(p.split(".")[-2].split("_")[-1]) for p in images])
        images = images[order]
        q = []
Пример #14
0
import cv2
from cv2 import aruco

dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)

nx = int(input('Define board size, # of squares in x [int]: '))
ny = int(input('Define board size, # of squares in y [int]: '))
wc = float(
    input('Define marker size, chess square size in m [float] (e.g. 0.025): '))
wa = float(
    input(
        'Define marker size, aruco square size in m [float] (e.g. 0.0175): '))
board = aruco.CharucoBoard_create(nx, ny, wc, wa, dictionary)

img = board.draw((9 * 500, 6 * 500))
cv2.imwrite('charuco_board.png', img)
Пример #15
0
import numpy as np
import cv2, PIL, os
from cv2 import aruco
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import imutils
#matplotlib nbagg

aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
imagesFolder = "images/"

########## create marker board for calibration ##########
# settings
board = aruco.CharucoBoard_create(4, 4, 0.04, 0.03, aruco_dict)
imboard = board.draw((4000, 4000))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.imshow(imboard, cmap=mpl.cm.gray, interpolation="nearest")
ax.axis("off")
cv2.imwrite(imagesFolder + "chessboard.tiff", imboard)
#plt.savefig(imagesFolder + "camCal.pdf")
print("->  print it out for calibration!")
plt.grid()
plt.show()

# ######### convert video to images ##########
# # Settings
# videoFile = "videos/camCal.mp4"
# aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
Пример #16
0
parser.add_argument('--dataset',
                    type=str,
                    help='directory for saving the data')
#
parser.add_argument('--aruco_bit',
                    type=int,
                    default=4,
                    help='format of aruco dictionary')
parser.add_argument('--board_dim',
                    type=int,
                    nargs="+",
                    default=[4, 6],
                    help='width, height of checkerboard (unit: squares)')
parser.add_argument('--square_len',
                    type=float,
                    default=0.062,
                    help='measured in metre')
parser.add_argument('--marker_len',
                    type=float,
                    default=0.046,
                    help='measured in metre')
parser.add_argument('--camera_topic',
                    type=str,
                    default='/camera/color/image_raw')
args = parser.parse_args()

aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_1000)  # 4X4 = 4x4 bit markers
charuco_board = aruco.CharucoBoard_create(args.board_dim[0], args.board_dim[1],
                                          args.square_len, args.marker_len,
                                          aruco_dict)
Пример #17
0
import os
import numpy as np
import cv2
from cv2 import aruco

aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)

board = aruco.CharucoBoard_create(8, 6, 1, 0.8, aruco_dict)
imboard = board.draw((8000, 6000))
imboard_show = cv2.resize(imboard, (800, 600))

cv2.imshow("Charuco", imboard_show)
cv2.waitKey(0)
chessboard_file = os.path.join('data', 'charuco_board.png')
cv2.imwrite(chessboard_file, imboard)
Пример #18
0
def makechessandcharucoboard(nrowchess=3,
                             ncolumnchess=5,
                             squaresize=25,
                             nrowcharuco=3,
                             ncolumncharuco=5,
                             markerdict=aruco.DICT_6X6_250,
                             squaresizearuco=25,
                             savepath='./',
                             paperwidth=210,
                             paperheight=297,
                             dpi=600,
                             framesize=None):
    """
    create half-chess and half-charuco board
    the paper is in portrait orientation, nrow means the number of markers in the vertical direction

    :param nrow:
    :param ncolumn:
    :param squaresize: mm
    :param markerdict:
    :param savepath:
    :param paperwidth: mm
    :param paperheight: mm
    :param dpi:
    :param framesize: (width, height) the 1pt frame for easy cut, nothing is drawn by default
    :return:

    author: weiwei
    date: 20190420
    """

    aruco_dict = aruco.Dictionary_get(markerdict)
    # 1mm = 0.0393701inch
    a4npxrow = int(paperheight * 0.0393701 * dpi)
    a4npxcolumn = int(paperwidth * 0.0393701 * dpi)
    bgimg = np.ones((a4npxrow, a4npxcolumn), dtype='uint8') * 255

    if framesize is not None:
        framesize[0] = int(framesize[0] * 0.0393701 * dpi)
        framesize[1] = int(framesize[1] * 0.0393701 * dpi)
        if a4npxcolumn < framesize[0] + 2:
            print("Frame width must be smaller than the #pt in each row.")
        if a4npxrow < framesize[1] + 2:
            print("Frame height must be smaller than the #pt in each column.")
        framelft = int((a4npxcolumn - framesize[0]) / 2 - 1)
        framergt = int(framelft + 1 + framesize[0])
        frametop = int((a4npxrow - framesize[1]) / 2 - 1)
        framedown = int(frametop + 1 + framesize[1])
        bgimg[frametop:framedown + 1, framelft:framelft + 1] = 0
        bgimg[frametop:framedown + 1, framergt:framergt + 1] = 0
        bgimg[frametop:frametop + 1, framelft:framergt + 1] = 0
        bgimg[framedown:framedown + 1, framelft:framergt + 1] = 0

    # upper half, charuco
    squaresizepx = int(squaresizearuco * 0.0393701 * dpi)
    squareareanpxrow = squaresizepx * nrowchess
    uppermargin = int((a4npxrow / 2 - squareareanpxrow) / 2)
    squareareanpxcolumn = squaresizepx * ncolumnchess
    leftmargin = int((a4npxcolumn - squareareanpxcolumn) / 2)

    if (uppermargin <= 10) or (leftmargin <= 10):
        print("Too many markers! Reduce nrow and ncolumn.")
        return

    board = aruco.CharucoBoard_create(ncolumnchess, nrowchess, squaresizearuco,
                                      .57 * squaresizearuco, aruco_dict)
    imboard = board.draw((squareareanpxcolumn, squareareanpxrow))
    print(imboard.shape)
    startrow = uppermargin
    endrow = uppermargin + squareareanpxrow
    startcolumn = leftmargin
    endcolumn = leftmargin + squareareanpxcolumn
    bgimg[startrow:endrow, startcolumn:endcolumn] = imboard

    # lower half, chess
    squaresizepx = int(squaresize * 0.0393701 * dpi)

    squareareanpxrow = squaresizepx * nrowcharuco
    uppermargin = int((a4npxrow / 2 - squareareanpxrow) / 2)
    squareareanpxcolumn = squaresizepx * ncolumncharuco
    leftmargin = int((a4npxcolumn - squareareanpxcolumn) / 2)

    if (uppermargin <= 10) or (leftmargin <= 10):
        print("Too many markers! Reduce nrow and ncolumn.")
        return

    for idnr in range(nrowcharuco):
        for idnc in range(ncolumncharuco):
            startrow = int(a4npxrow / 2) + uppermargin + idnr * squaresizepx
            endrow = startrow + squaresizepx
            startcolumn = leftmargin + idnc * squaresizepx
            endcolumn = squaresizepx + startcolumn
            if idnr % 2 != 0 and idnc % 2 != 0:
                bgimg[startrow:endrow, startcolumn:endcolumn] = 0
            if idnr % 2 == 0 and idnc % 2 == 0:
                bgimg[startrow:endrow, startcolumn:endcolumn] = 0

    im = Image.fromarray(bgimg).convert("L")
    im.save(savepath + "test.pdf", "PDF", resolution=dpi)
Пример #19
0
import numpy as np
import cv2
import cv2.aruco as aruco
ID = 1  #ID du marqueur !
'''
    drawMarker(...)
        drawMarker(dictionary, id, sidePixels[, img[, borderBits]]) -> img
'''
files = ""
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
squareLength = 40  # Here, our measurement unit is centimetre.
markerLength = 30  # Here, our measurement unit is centimetre.
board = aruco.CharucoBoard_create(5, 7, squareLength, markerLength, aruco_dict)
for i in range(12):
    # second parameter is id number
    # last parameter is total image size
    img = aruco.drawMarker(aruco_dict, i, 125)
    # BBB = board.draw((200*3,200*3))
    files = "ID" + str(i) + ".jpg"
    cv2.imwrite(files, img)
# cv2.imwrite("test.jpg",board)

cv2.imshow('frame', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Пример #20
0
def charuco_detect(path, video_resolution):

    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
    board = aruco.CharucoBoard_create(7, 5, 1, .8, aruco_dict)
    images = glob.glob(path)

    def read_chessboards(images):
        """
        Charuco base pose estimation.
        """
        print("POSE ESTIMATION STARTS:")
        allCorners = []
        allIds = []
        decimator = 0
        # SUB PIXEL CORNER DETECTION CRITERION
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,
                    0.00001)

        for im in images:
            print("=> Processing image {0}".format(im))
            frame = cv2.imread(im)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
                gray, aruco_dict)

            if len(corners) > 0:
                # SUB PIXEL DETECTION
                for corner in corners:
                    cv2.cornerSubPix(gray,
                                     corner,
                                     winSize=(5, 5),
                                     zeroZone=(-1, -1),
                                     criteria=criteria)
                res2 = cv2.aruco.interpolateCornersCharuco(
                    corners, ids, gray, board)
                if res2[1] is not None and res2[2] is not None and len(
                        res2[1]) > 3 and decimator % 1 == 0:
                    allCorners.append(res2[1])
                    allIds.append(res2[2])

            decimator += 1

        imsize = gray.shape
        return allCorners, allIds, imsize

    def calibrate_camera(allCorners, allIds, imsize):
        """
        Calibrates the camera using the dected corners.
        """
        print("CAMERA CALIBRATION")

        cameraMatrixInit = np.array([[1000., 0., imsize[0] / 2.],
                                     [0., 1000., imsize[1] / 2.], [0., 0.,
                                                                   1.]])

        distCoeffsInit = np.zeros((5, 1))
        flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL +
                 cv2.CALIB_FIX_ASPECT_RATIO)
        #flags = (cv2.CALIB_RATIONAL_MODEL)
        (ret, camera_matrix, distortion_coefficients0, rotation_vectors,
         translation_vectors, stdDeviationsIntrinsics, stdDeviationsExtrinsics,
         perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended(
             charucoCorners=allCorners,
             charucoIds=allIds,
             board=board,
             imageSize=imsize,
             cameraMatrix=cameraMatrixInit,
             distCoeffs=distCoeffsInit,
             flags=flags,
             criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000,
                       1e-9))

        return ret, camera_matrix, distortion_coefficients0, rotation_vectors, translation_vectors

    allCorners, allIds, imsize = read_chessboards(images)
    ret, mtx, dist, rvecs, tvecs = calibrate_camera(allCorners, allIds, imsize)
    tvecs = np.array(tvecs)
    mtx = np.array(mtx)
    rvecs = np.array(rvecs)

    return mtx, dist, rvecs[0], tvecs[0], allCorners[0], allIds[0]
Пример #21
0
}
# For validating results, show aruco board to camera.
board_params["aruco_dict"] = aruco.getPredefinedDictionary(board_params["dict_type"])
"""

# create arUco board
if board_params["board_type"] == "aruco":
    board = aruco.GridBoard_create(board_params["markersX"],
                                   board_params["markersY"],
                                   board_params["markerLength"],
                                   board_params["markerSeparation"],
                                   board_params["aruco_dict"])
elif board_params["board_type"] == "charuco":
    board = aruco.CharucoBoard_create(board_params["squaresX"],
                                      board_params["squaresY"],
                                      board_params["squareLength"],
                                      board_params["markerLength"],
                                      board_params["aruco_dict"])
else:
    raise ValueError("Couldn't find correct board type (aruco or charuco)")
"""uncomment following block to draw and show the board"""
# img = board.draw((864, 1080))
# cv2.imshow("aruco", img)
# cv2.imwrite("aruco.png", img)

arucoParams = aruco.DetectorParameters_create()

# Set second camera outside laptop
# cv2.CAP_DSHOW removes delay on logitech C920 webcam
camera = cv2.VideoCapture(1, cv2.CAP_DSHOW)
camera.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
def run(window_placement_x, window_placement_y, max_window_size_x,
        max_window_size_y):
    # board properties

    # aruco example
    board_params = {
        "board_type": "aruco",
        "dict_type": aruco.DICT_6X6_1000,
        "markersX": 4,
        "markersY": 5,
        "markerLength": 3.75,  # Provide length of the marker's side [cm]
        "markerSeparation": 0.5,  # Provide separation between markers [cm]
    }
    # For validating results, show aruco board to camera.
    board_params["aruco_dict"] = aruco.getPredefinedDictionary(
        board_params["dict_type"])
    """# charuco example
    board_params_params = {
        "board_type": "charuco",
        "dict_type": aruco.DICT_4X4_1000,
        "squaresX": 4,
        "squaresY": 4,
        "squareLength": 3,  # Provide length of the square's side [cm]
        "markerLength": 3 * 7 / 9,  # Provide length of the marker's side [cm]
    }
    # For validating results, show aruco board to camera.
    board_params["aruco_dict"] = aruco.getPredefinedDictionary(board_params["dict_type"])
    """

    # create arUco board
    if board_params["board_type"] == "aruco":
        board = aruco.GridBoard_create(board_params["markersX"],
                                       board_params["markersY"],
                                       board_params["markerLength"],
                                       board_params["markerSeparation"],
                                       board_params["aruco_dict"])
    elif board_params["board_type"] == "charuco":
        board = aruco.CharucoBoard_create(board_params["squaresX"],
                                          board_params["squaresY"],
                                          board_params["squareLength"],
                                          board_params["markerLength"],
                                          board_params["aruco_dict"])
    else:
        raise ValueError("Couldn't find correct board type (aruco or charuco)")
    """uncomment following block to draw and show the board"""
    # img = board.draw((864, 1080))
    # cv2.imshow("aruco", img)
    # cv2.imwrite("aruco.png", img)

    arucoParams = aruco.DetectorParameters_create()

    # Set second camera outside laptop
    # cv2.CAP_DSHOW removes delay on logitech C920 webcam
    camera = cv2.VideoCapture(1, cv2.CAP_DSHOW)
    camera.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
    camera.set(cv2.CAP_PROP_FRAME_WIDTH, 960)
    camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
    # Turn the autofocus off
    camera.set(cv2.CAP_PROP_AUTOFOCUS, 0)

    # Open calibration file created with data_generation.py
    with open('calibration.yaml') as f:
        loadeddict = yaml.safe_load(f)
    mtx = loadeddict.get('camera_matrix')
    dist = loadeddict.get('dist_coeff')
    mtx = np.array(mtx)
    dist = np.array(dist)

    ret, img = camera.read()
    img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    h, w = img_gray.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,
                                                      (w, h))

    pose_r, pose_t = [], []

    while True:
        ret, img = camera.read()
        img_aruco = img
        im_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        h, w = im_gray.shape[:2]
        dst = cv2.undistort(im_gray, mtx, dist, None, newcameramtx)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            dst, board_params["aruco_dict"], parameters=arucoParams)
        # cv2.imshow("original", img_gray)
        if corners == None:
            raise Exception("No aruco corners found!")
        else:
            rvec = None
            tvec = None
            ret, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board,
                                                      newcameramtx, dist, rvec,
                                                      tvec)  # For a board

            print("Rotation ", rvec, "\nTranslation", tvec, "\n=======")
            if ret != 0:
                img_aruco = aruco.drawDetectedMarkers(img, corners, ids,
                                                      (0, 255, 0))
                img_aruco = aruco.drawAxis(
                    img_aruco, newcameramtx, dist, rvec, tvec, 10
                )  # axis length 100 can be changed according to your requirement
            cv2.imshow("World co-ordinate frame axes", img_aruco)
            if cv2.waitKey(0) & 0xFF == ord('q'):
                break
    cv2.destroyAllWindows()

    # output from aruco
    theta = Symbol('theta')

    # aruco marker values [cm]
    aruco_rvec = [rvec[0][0], rvec[1][0], rvec[2][0]]
    aruco_tvec = [tvec[0][0], tvec[1][0], tvec[2][0]]

    # x-value is off by 84 mm
    # y-value is off by 4 mm
    # z-value is off by 25 mm
    aruco_x = aruco_tvec[0] / 100
    aruco_y = aruco_tvec[
        2] / 100  # y and z-axis are switched in the robot coordinates!!!
    aruco_z = aruco_tvec[
        1] / -100  # y and z-axis are switched in the robot coordinates!!! the minus is to compensate for axis orientation
    aruco_angle = 0

    theta = aruco_angle
    x, y, z = 0.485, -0.295, 0.456  # coordinates of camera in respect to robot coord [m]
    a, b, c = aruco_x, aruco_y, aruco_z  # coordinates of marker in respect to the camera [m]

    # translate 3d coordinate systems
    Robot = CoordSys3D('Robot')  # base coordinate system
    Cam = Robot.locate_new('Cam', x * Robot.i + y * Robot.j + z * Robot.k)
    Cam2 = Cam.orient_new_axis('Cam2', theta, Cam.i)
    Marker = Cam2.locate_new('Marker', a * Cam2.i + b * Cam2.j + c * Cam2.k)

    # Calculate marker position with respect to the robots base
    mat = Marker.position_wrt(Robot).to_matrix(Robot)
    mat = dense.matrix2numpy(mat, float)

    fig = pyplot.figure()
    ax = Axes3D(fig)

    ax.scatter(0, 0, 0, label='Robot position')
    ax.scatter(x, y, z, label='Camera position')

    ax.scatter(mat[0][0], mat[1][0], mat[2][0], label='Label position')

    ax.legend()
    ax.set_xlabel("x-axis")
    ax.set_ylabel("y-axis")
    ax.set_zlabel("z-axis")

    # Plot positions of robot_base, camera and position of the marker in 3D
    # todo turn on plot for info (turned off for fast testing)
    # pyplot.show()

    # Robot movements
    # todo: Moving x, y and z-plane robot
    # todo: Line up with aruco Marker (0.2m y-offset from board)

    rob = van_robot.Robot("192.168.0.1", True)  # Set IP address
    orig_tcp = (0, 0, 0.150, 0, 0, 0)
    rob.set_tcp(orig_tcp)  # Set Tool Center Point
    rob.set_payload(0.5, (0, 0, 0))  # Set payload for joints
    time.sleep(0.2)  # leave some time to robot to process the setup commands
    a, v = (0.05, 0.5)  # acceleration and movementspeed of robot

    # Start from the pose the robot is in
    pos0 = rob.getl()

    # Tool orientation [rx, ry, rz]
    tool_orient = [0, -2.221441469, -2.221441469]

    # Put the X, Y, Z of start pose in (last 3 coordinates are tool head positioning)
    pos_start = [
        pos0[0], pos0[1], pos0[2], tool_orient[0], tool_orient[1],
        tool_orient[2]
    ]

    # Take the Y-distance 0.10[m] out of the van
    safe_dist = mat[1] - 0.12

    # Move to the aruco marker point
    # todo turn on move command
    rob.movel((mat[0], safe_dist, mat[2], tool_orient[0], tool_orient[1],
               tool_orient[2]), a, v)

    # Calibration Y-plane and X,Z-Axis

    # Declare the port and baudrate for the Arduino and connect to the Arduino
    # todo CHECK COM!
    arduino = arduino_serial.Arduino("COM3")

    # start position of the robot (from aruco marker)
    # assumed is a robot start position which is close to the centre of the measured plane
    pos_start = (mat[0], safe_dist, mat[2], tool_orient[0], tool_orient[1],
                 tool_orient[2])

    # Take safe offset from Y-plan after global aruco Y-value
    measurement_distance = -0.03
    board_distance = -0.1
    rob.centre_for_triangle(arduino, measurement_distance, board_distance)

    # Define Y-plane
    rob.measure_triangle(arduino, 0.160, a, v)

    # Define X,Z-axis
    points1, points2 = rob.repetitive_def_axis(arduino, -0.02)

    # Create origin

    # coordinates in [X,Z]

    # SIDE Coordinates [z-formula]
    coor1 = [points1[0][2][0], points1[0][2][2]]  # [X,Z]
    coor2 = [points2[0][2][0], points2[0][2][2]]

    # TOP Coordinates [x-formula]
    coor3 = [points1[1][2][0], points1[1][2][2]]
    coor4 = [points2[1][2][0], points2[1][2][2]]

    # create_origin.formula_line('x',coor1,coor2)
    # create_origin.formula_line('z',coor3,coor4)
    line_x = create_origin.formula_line('x', coor1, coor2)
    line_z = create_origin.formula_line('z', coor3, coor4)
    origin = create_origin.intersect_lines(line_x, line_z)
    print(origin)  # [X,Z]

    # Calculate translation to origin
    cur_pos = rob.getl()
    cur_pos[0] = origin[0]
    cur_pos[2] = origin[1]
    # Move to origin (first moves out, then in x,z-plane)
    print("X,Z-Axis origin has been set.")
    print("Moving to origin.")
    rob.move_y_plane(-0.15, cur_pos)

    #test stuff for rectangle
    delta_x = window_placement_x  # distance in meters
    delta_z = window_placement_y  # distance in meters
    radius = 0.06
    x_axis_offset = max_window_size_x  # [m]
    z_axis_offset = -max_window_size_y  # [m]

    rob.draw_square(delta_x, delta_z, radius, -0.012, x_axis_offset,
                    z_axis_offset)
import cv2
from cv2 import aruco

# Create aruco chessboard then we load it into Unreal
aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
board = aruco.CharucoBoard_create(3, 3, .025, .0125, aruco_dict)
image_board = board.draw((200 * 3, 200 * 3))
cv2.imwrite('.\src\calibration\charuco.png', image_board)
Пример #24
0
def calibcharucoboard(nrow,
                      ncolumn,
                      arucomarkerdict=aruco.DICT_6X6_250,
                      squaremarkersize=25,
                      imgspath='./',
                      savename='mycam_charuco_data.yaml'):
    """

    :param nrow:
    :param ncolumn:
    :param markerdict:
    :param imgspath:
    :param savename:
    :return:

    author: weiwei
    date: 20190420
    """

    # read images and detect cornders
    aruco_dict = aruco.Dictionary_get(arucomarkerdict)
    allCorners = []
    allIds = []
    # SUB PIXEL CORNER DETECTION CRITERION
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 27, 0.0001)

    board = aruco.CharucoBoard_create(ncolumn, nrow, squaremarkersize,
                                      .57 * squaremarkersize, aruco_dict)

    images = glob.glob(imgspath + '*.png')
    candfiles = []
    for fname in images:
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            gray, aruco_dict)
        if len(corners) > 0:
            # SUB PIXEL DETECTION
            for corner in corners:
                cv2.cornerSubPix(gray,
                                 corner,
                                 winSize=(2, 2),
                                 zeroZone=(-1, -1),
                                 criteria=criteria)
            res2 = cv2.aruco.interpolateCornersCharuco(corners, ids, gray,
                                                       board)
            # require len(res2[1]) > nrow*ncolumn/2 at least half of the corners are detected
            if res2[1] is not None and res2[2] is not None and len(
                    res2[1]) > (nrow - 1) * (ncolumn - 1) / 2:
                allCorners.append(res2[1])
                allIds.append(res2[2])
                candfiles.append(fname.split("/")[-1])
            imaxis = aruco.drawDetectedMarkers(img, corners, ids)
            imaxis = aruco.drawDetectedCornersCharuco(imaxis, res2[1], res2[2],
                                                      (255, 255, 0))
            cv2.imshow('img', imaxis)
            cv2.waitKey(100)

    # The calibratedCameraCharucoExtended function additionally estimate calibration errors
    # Thus, stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors are returned
    # We dont use them here though
    # see https://docs.opencv.org/3.4.6/d9/d6a/group__aruco.html for details
    (ret, mtx, dist, rvecs, tvecs,
     stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors) = \
        cv2.aruco.calibrateCameraCharucoExtended(charucoCorners=allCorners, charucoIds=allIds, board=board,
                                                 imageSize=gray.shape, cameraMatrix=None, distCoeffs=None,
                                                 flags=cv2.CALIB_RATIONAL_MODEL,
                                                 criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9))
    print(ret, mtx, dist, rvecs, tvecs, candfiles)
    if ret:
        with open(savename, "w") as f:
            yaml.dump([mtx, dist, rvecs, tvecs, candfiles], f)
Пример #25
0
def make_charuco_board(nrow,
                       ncolumn,
                       marker_dict=aruco.DICT_4X4_250,
                       square_size=25,
                       save_path='./',
                       name='test',
                       frame_size=None,
                       paper_width=210,
                       paper_height=297,
                       dpi=600):
    """
    create charuco board
    the paper is in portrait orientation, nrow means the number of markers in the vertical direction
    :param nrow:
    :param ncolumn:
    :param marker_dict:
    :param save_path:
    :param name
    :param frame_size: (width, height) the 1pt frame for easy cut, nothing is drawn by default
    :param paper_width: mm
    :param paper_height: mm
    :param dpi:
    :return:
    author: weiwei
    date: 20190420
    """
    aruco_dict = aruco.Dictionary_get(marker_dict)
    # 1mm = _MM_TO_INCHinch
    a4npxrow = int(paper_height * _MM_TO_INCH * dpi)
    a4npxcolumn = int(paper_width * _MM_TO_INCH * dpi)
    bgimg = np.ones((a4npxrow, a4npxcolumn), dtype='uint8') * 255
    if frame_size is not None:
        frame_size[0] = int(frame_size[0] * _MM_TO_INCH * dpi)
        frame_size[1] = int(frame_size[1] * _MM_TO_INCH * dpi)
        if a4npxcolumn < frame_size[0] + 2:
            print("Frame width must be smaller than the #pt in each row.")
        if a4npxrow < frame_size[1] + 2:
            print("Frame height must be smaller than the #pt in each column.")
        framelft = int((a4npxcolumn - frame_size[0]) / 2 - 1)
        framergt = int(framelft + 1 + frame_size[0])
        frametop = int((a4npxrow - frame_size[1]) / 2 - 1)
        framedown = int(frametop + 1 + frame_size[1])
        bgimg[frametop:framedown + 1, framelft:framelft + 1] = 0
        bgimg[frametop:framedown + 1, framergt:framergt + 1] = 0
        bgimg[frametop:frametop + 1, framelft:framergt + 1] = 0
        bgimg[framedown:framedown + 1, framelft:framergt + 1] = 0
    squaresizepx = int(square_size * _MM_TO_INCH * dpi)
    squareareanpxrow = squaresizepx * nrow
    uppermargin = int((a4npxrow - squareareanpxrow) / 2)
    squareareanpxcolumn = squaresizepx * ncolumn
    leftmargin = int((a4npxcolumn - squareareanpxcolumn) / 2)
    if (uppermargin <= 10) or (leftmargin <= 10):
        print("Too many markers! Reduce nrow and ncolumn.")
        return
    board = aruco.CharucoBoard_create(ncolumn, nrow, square_size,
                                      .57 * square_size, aruco_dict)
    imboard = board.draw((squareareanpxcolumn, squareareanpxrow))
    print(imboard.shape)
    startrow = uppermargin
    endrow = uppermargin + squareareanpxrow
    startcolumn = leftmargin
    endcolumn = leftmargin + squareareanpxcolumn
    bgimg[startrow:endrow, startcolumn:endcolumn] = imboard
    im = Image.fromarray(bgimg).convert("L")
    im.save(save_path + name + ".pdf", "PDF", resolution=dpi)
Пример #26
0
    def calibrate_camera_charucoBoard(self):
        """
        Method to obtain the camera intrinsic parameters to perform the aruco pose estimation
        Args:
        Returns:
            mtx: cameraMatrix Output 3x3 floating-point camera matrix
            dist: Output vector of distortion coefficient
            rvecs: Output vector of rotation vectors (see Rodrigues ) estimated for each board view
            tvecs: Output vector of translation vectors estimated for each pattern view.
        """

        aruco_dict = aruco.Dictionary_get(self.aruco_dict)
        board = aruco.CharucoBoard_create(7, 5, 1, .8, aruco_dict)
        images = []
        logger.info('Start moving randomly the aruco board')
        n = 400  # number of frames
        for i in range(n):
            frame = self.kinect.get_color()
            images.append(frame)
        logger.info("Stop moving the board")
        img_frame = numpy.array(images)[0::5]

        logger.info("Calculating Aruco location of %s images" %
                    str(img_frame.shape[0]))
        allCorners = []
        allIds = []
        decimator = 0

        for im in img_frame:
            # print("=> Processing image {0}".format(im))
            # frame = cv2.imread(im)
            gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            res = cv2.aruco.detectMarkers(gray, aruco_dict)

            if len(res[0]) > 0:
                res2 = cv2.aruco.interpolateCornersCharuco(
                    res[0], res[1], gray, board)
                if res2[1] is not None and res2[2] is not None and len(
                        res2[1]) > 3 and decimator % 1 == 0:
                    allCorners.append(res2[1])
                    allIds.append(res2[2])

            decimator += 1
        imsize = gray.shape
        logger.info("Finish")

        logger.info("Calculating camera parameters")
        cameraMatrixInit = numpy.array([[2000., 0., imsize[0] / 2.],
                                        [0., 2000., imsize[1] / 2.],
                                        [0., 0., 1.]])

        distCoeffsInit = numpy.zeros((5, 1))
        flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL)
        ret, mtx, dist, rvecs, tvecs, stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors = \
            cv2.aruco.calibrateCameraCharucoExtended(
                charucoCorners=allCorners,
                charucoIds=allIds,
                board=board,
                imageSize=imsize,
                cameraMatrix=cameraMatrixInit,
                distCoeffs=distCoeffsInit,
                flags=flags,
                criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9)
            )

        logger.info("Finish")

        self.calib.camera_mtx = mtx.tolist()
        self.calib.camera_dist = dist.tolist()

        return mtx, dist, rvecs, tvecs
# Define the type of ArUco markers to make the board from
# aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)

# Define the board's setup and create it
NUM_SQUARES_X = 7
NUM_SQUARES_Y = 5

# We're using inches to match opencv dpi saving
SQUARE_LENGTH = 1.5                       # absolute length of a square (inch) 
MARKER_LENGTH = 0.8 * SQUARE_LENGTH     # absolute length of a marker (inch)

# CharucoBoard_create(squaresX, squaresY, squareLength, markerLength, dictionary)
board = aruco.CharucoBoard_create(NUM_SQUARES_X, 
                                  NUM_SQUARES_Y, 
                                  SQUARE_LENGTH, 
                                  MARKER_LENGTH, 
                                  aruco_dict)

# Then draw it, specifyin the output dimensions in pixels. We're using 72 
# pixels per inch here, to match openCV saving for png images. This will create
# markers whose physicaly size, when printed at 100%, matches that which we 
# specified above.
imboard = board.draw((int(NUM_SQUARES_X * SQUARE_LENGTH * 72), 
                      int(NUM_SQUARES_Y * SQUARE_LENGTH * 72)))

# Now, save the board. Be sure to change the filename to reflect the aruco_dict
# that you used to generate it. We also include the square length and marker 
# length since those parameters are important for both calibration and pose
# estimation. We'll first save it as a .png
cv2.imwrite(f'ChArUco_orig_sq{SQUARE_LENGTH}_mark{MARKER_LENGTH:.2f}.png', 
Пример #28
0
from __future__ import print_function
import cv2
import cv2.aruco as aruco
import numpy as np
from PIL import Image
import sys
import rosbag
from cv_bridge import CvBridge, CvBridgeError

#arucoType = aruco.DICT_6X6_100
#arucoType = aruco.DICT_6X6_1000 # The single marker
#arucoType = aruco.DICT_5X5_250

arucoType = aruco.DICT_4X4_250
arucoBoard = aruco.CharucoBoard_create(5, 5, 0.7, 0.5,
                                       aruco.Dictionary_get(arucoType))
dictionary = cv2.aruco.getPredefinedDictionary(arucoType)
board = arucoBoard.draw((7014, 4962))

# The following is the calibration matrix for the ZED camera

cal = (2.114370012156985,
       np.array([[1.20642153e+04, 0.00000000e+00, 3.87673066e+02],
                 [0.00000000e+00, 2.40412556e+04, 6.09900945e+02],
                 [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
       np.array([[
           2.97019230e+01, -1.81529858e+02, 4.60817372e-01, -4.19902342e-01,
           -1.34497073e+04
       ]]), [
           np.array([[1.70704508], [-1.40506077], [0.78495047]]),
           np.array([[1.70206233], [-1.40638324], [0.78495157]]),
Пример #29
0
import numpy as np

import cv2, PIL, os
from cv2 import aruco
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl

aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
board = aruco.CharucoBoard_create(7, 5, 1, .8, aruco_dict)
imboard = board.draw((2000, 2000))
cv2.imwrite("./chessboard.tiff", imboard)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.imshow(imboard, cmap=mpl.cm.gray, interpolation="nearest")
ax.axis("off")
#plt.show()
plt.savefig("generated_charuco_board.png")
plt.show()
Пример #30
0
import numpy as np
import cv2, os
from cv2 import aruco
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl



workdir = "./workdir/"
aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_100)
board = aruco.CharucoBoard_create(8, 5, 5, 4, aruco_dict)
imboard = board.draw((1750, 1250))
cv2.imwrite("chessboard.png", imboard)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.imshow(imboard, cmap = mpl.cm.gray, interpolation = "nearest")
ax.axis("off")
plt.show()