def get_corners_aruco(fname, board, skip=20):
    cap = cv2.VideoCapture(fname)

    length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    allCorners = []
    allIds = []

    go = int(skip / 2)

    board_type = get_board_type(board)
    board_size = get_board_size(board)

    max_size = get_expected_corners(board)

    for framenum in trange(length, ncols=70):
        ret, frame = cap.read()
        if not ret:
            break

        if framenum % skip != 0 and go <= 0:
            continue

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        params = aruco.DetectorParameters_create()
        params.cornerRefinementMethod = aruco.CORNER_REFINE_CONTOUR
        params.adaptiveThreshWinSizeMin = 100
        params.adaptiveThreshWinSizeMax = 700
        params.adaptiveThreshWinSizeStep = 50
        params.adaptiveThreshConstant = 5

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, board.dictionary, parameters=params
        )

        if corners is None or len(corners) <= 2:
            go = max(0, go - 1)
            continue

        detectedCorners, detectedIds, rejectedCorners, recoveredIdxs = aruco.refineDetectedMarkers(
            gray, board, corners, ids, rejectedImgPoints, parameters=params
        )

        if board_type == "charuco" and len(detectedCorners) > 0:
            ret, detectedCorners, detectedIds = aruco.interpolateCornersCharuco(
                detectedCorners, detectedIds, gray, board
            )

        if (
            detectedCorners is not None
            and len(detectedCorners) >= 2
            and len(detectedCorners) <= max_size
        ):
            allCorners.append(detectedCorners)
            allIds.append(detectedIds)
            go = int(skip / 2)

        go = max(0, go - 1)

    cap.release()

    return allCorners, allIds
Exemple #2
0
    def run(self):
        cap, resolution = init_cv()
        t = threading.current_thread()
        cf, URI = init_cf()

        if cf is None:
            print('Not running cf code.')
            return

        aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
        parameters = aruco.DetectorParameters_create()
        font = cv2.FONT_HERSHEY_PLAIN
        id2find = [0, 1]
        marker_size = [0.112, 0.0215]  #0.1323
        ids_seen = [0, 0]
        id2follow = 0
        zvel = 0.0

        camera_matrix, camera_distortion, _ = loadCameraParams('runcam_nano3')

        with SyncCrazyflie(URI, cf=Crazyflie(rw_cache='./cache')) as scf:
            cf = scf.cf
            activate_high_level_commander(cf)

            # We take off when the commander is created
            with PositionHlCommander(scf) as pc:

                #pc.go_to(0.0, 0.0, 0.5)

                while not self._stopevent.isSet():

                    ret, frame = cap.read()

                    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                    corners, ids, rejected = aruco.detectMarkers(
                        image=gray,
                        dictionary=aruco_dict,
                        parameters=parameters,
                        cameraMatrix=camera_matrix,
                        distCoeff=camera_distortion)

                    if ids is not None:

                        #-- Draw the detected marker and put a reference frame over it
                        aruco.drawDetectedMarkers(frame, corners)

                        #-- Calculate which marker has been seen most at late
                        if 0 in ids:
                            ids_seen[0] += 1
                        else:
                            ids_seen[0] = 0

                        if 1 in ids:
                            ids_seen[1] += 2
                        else:
                            ids_seen[1] = 0

                        id2follow = 0  #np.argmax(ids_seen)
                        idx_r, idx_c = np.where(ids == id2follow)

                        #-- Extract the id to follow
                        corners = np.asarray(corners)
                        corners = corners[idx_r, :]

                        #-- Estimate poses of the marker to follow
                        rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                            corners, marker_size[id2follow], camera_matrix,
                            camera_distortion)

                        #-- Unpack the output, get only the first
                        rvec, tvec = rvecs[0, 0, :], tvecs[0, 0, :]

                        # Draw the detected markers axis
                        for i in range(len(rvecs)):
                            aruco.drawAxis(frame, camera_matrix,
                                           camera_distortion, rvecs[i, 0, :],
                                           tvecs[i, 0, :],
                                           marker_size[id2follow] / 2)

                        #-- Obtain the rotation matrix tag->camera
                        R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
                        R_tc = R_ct.T

                        #-- Now get Position and attitude of the camera respect to the marker
                        if id2follow == 0:
                            pos_camera = -R_tc * (np.matrix(tvec).T - T_slide)
                        else:
                            pos_camera = -R_tc * (np.matrix(tvec).T)

                        str_position = "Position error: x=%4.4f  y=%4.4f  z=%4.4f" % (
                            pos_camera[0], pos_camera[1], pos_camera[2])
                        cv2.putText(frame, str_position, (0, 20), font, 1,
                                    ugly_const.BLACK, 2, cv2.LINE_AA)

                        #-- Get the attitude of the camera respect to the frame
                        roll_camera, pitch_camera, yaw_camera = rotationMatrixToEulerAngles(
                            R_flip * R_tc)
                        att_camera = [
                            math.degrees(roll_camera),
                            math.degrees(pitch_camera),
                            math.degrees(yaw_camera)
                        ]

                        str_attitude = "Anglular error: roll=%4.4f  pitch=%4.4f  yaw (z)=%4.4f" % (
                            att_camera[0], att_camera[1], att_camera[2])
                        cv2.putText(frame, str_attitude, (0, 40), font, 1,
                                    ugly_const.BLACK, 2, cv2.LINE_AA)

                        drawHUD(frame, resolution, yaw_camera)

                        pos_flip = np.array([[-pos_camera.item(1)],
                                             [pos_camera.item(0)]])
                        cmd_flip = np.array(
                            [[np.cos(yaw_camera), -np.sin(yaw_camera)],
                             [np.sin(yaw_camera),
                              np.cos(yaw_camera)]])
                        pos_cmd = cmd_flip.dot(pos_flip)

                        cf.extpos.send_extpos(pos_cmd[0], pos_cmd[1],
                                              pos_cmd[2])

                        print('Following tag ', id2follow, ' with position ',
                              pos_cmd[0], pos_cmd[1])

                        #if np.sqrt(pos_cmd[0]*pos_cmd[0]+pos_cmd[1]*pos_cmd[1]) > 0.05:
                        #mc._set_vel_setpoint(pos_cmd[0]*ugly_const.Kx, pos_cmd[1]*ugly_const.Ky, zvel, -att_camera[2]*ugly_const.Kyaw)
                        # elif pos_camera.item(2) > 0.1:
                        #     mc._set_vel_setpoint(pos_cmd[0]*ugly_const.Kx, pos_cmd[1]*ugly_const.Ky, -0.05, -att_camera[2]*ugly_const.Kyaw)
                        # else:
                        #     mc._set_vel_setpoint(pos_cmd[0]*ugly_const.Kx, pos_cmd[1]*ugly_const.Ky, 0.05, -att_camera[2]*ugly_const.Kyaw)

                        pc.go_to(0.0, 0.0)

                    cv2.imshow('frame', frame)

                    key = cv2.waitKey(1) & 0xFF
                    if key == ord('q'):
                        cap.release()
                        cv2.destroyAllWindows()
                    elif key == ord('w'):
                        zvel += 0.1
                    elif key == ord('s'):
                        zvel -= 0.1

                # We land when the commander goes out of scope

                print('Landing...')

        print('Stopping cf_thread')
def multi_marker_pose():
    '''
    Function to perform pose estimation on ArUco markers
    Return  : rvec_list : list of estimated rotation vectors
              tvec_list : list of estimated translation vectors
    '''
    cap1 = cv2.VideoCapture(0)
    print('Calibrating....')
    mtx, dist, _, _ = calib()
    print('Calibrated')
    dir_name = os.getcwd()
    obj_1 = OBJ(os.path.join(dir_name, 'models/1.obj'), swapyz=True)
    obj_2 = OBJ(os.path.join(dir_name, 'models/2.obj'), swapyz=True)
    obj_3 = OBJ(os.path.join(dir_name, 'models/3.obj'), swapyz=True)
    obj_4 = OBJ(os.path.join(dir_name, 'models/4.obj'), swapyz=True)
    obj_5 = OBJ(os.path.join(dir_name, 'models/5.obj'), swapyz=True)
    obj_6 = OBJ(os.path.join(dir_name, 'models/6.obj'), swapyz=True)
    obj_7 = OBJ(os.path.join(dir_name, 'models/7.obj'), swapyz=True)
    obj_8 = OBJ(os.path.join(dir_name, 'models/8.obj'), swapyz=True)
    obj_9 = OBJ(os.path.join(dir_name, 'models/9.obj'), swapyz=True)
    obj_10 = OBJ(os.path.join(dir_name, 'models/10.obj'), swapyz=True)
    obj_11 = OBJ(os.path.join(dir_name, 'models/11.obj'), swapyz=True)
    obj_12 = OBJ(os.path.join(dir_name, 'models/12.obj'), swapyz=True)
    obj_13 = OBJ(os.path.join(dir_name, 'models/13.obj'), swapyz=True)
    obj_14 = OBJ(os.path.join(dir_name, 'models/14.obj'), swapyz=True)
    obj_15 = OBJ(os.path.join(dir_name, 'models/15.obj'), swapyz=True)
    obj_16 = OBJ(os.path.join(dir_name, 'models/16.obj'), swapyz=True)
    obj_17 = OBJ(os.path.join(dir_name, 'models/17.obj'), swapyz=True)
    obj_18 = OBJ(os.path.join(dir_name, 'models/18.obj'), swapyz=True)
    obj_19 = OBJ(os.path.join(dir_name, 'models/19.obj'), swapyz=True)
    obj_20 = OBJ(os.path.join(dir_name, 'models/20.obj'), swapyz=True)

    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    params = aruco.DetectorParameters_create()
    ref_1 = cv2.imread(os.path.join(dir_name, 'markers/m1.jpg'), 0)
    corners_src1, ids_src1, rejected_src1 = aruco.detectMarkers(ref_1, aruco_dict,
                                            parameters = params)
    ref_2 = cv2.imread(os.path.join(dir_name, 'markers/m2.jpg'), 0)
    corners_src2, ids_src2, rejected_src2 = aruco.detectMarkers(ref_2, aruco_dict,
                                            parameters = params)
    ref_3 = cv2.imread(os.path.join(dir_name, 'markers/m3.jpg'), 0)
    corners_src3, ids_src3, rejected_src3 = aruco.detectMarkers(ref_3, aruco_dict,
                                            parameters = params)
    ref_4 = cv2.imread(os.path.join(dir_name, 'markers/m4.jpg'), 0)
    corners_src4, ids_src4, rejected_src4 = aruco.detectMarkers(ref_4, aruco_dict,
                                            parameters = params)
    ref_5 = cv2.imread(os.path.join(dir_name, 'markers/m5.jpg'), 0)
    corners_src5, ids_src5, rejected_src5 = aruco.detectMarkers(ref_5, aruco_dict,
                                            parameters = params)
    ref_6 = cv2.imread(os.path.join(dir_name, 'markers/m6.jpg'), 0)
    corners_src6, ids_src6, rejected_src6 = aruco.detectMarkers(ref_6, aruco_dict,
                                            parameters = params)
    ref_7 = cv2.imread(os.path.join(dir_name, 'markers/m7.jpg'), 0)
    corners_src7, ids_src7, rejected_src7 = aruco.detectMarkers(ref_7, aruco_dict,
                                            parameters = params)
    ref_8 = cv2.imread(os.path.join(dir_name, 'markers/m8.jpg'), 0)
    corners_src8, ids_src8, rejected_src8 = aruco.detectMarkers(ref_8, aruco_dict,
                                            parameters = params)
    ref_9 = cv2.imread(os.path.join(dir_name, 'markers/m9.jpg'), 0)
    corners_src9, ids_src9, rejected_src9 = aruco.detectMarkers(ref_9, aruco_dict,
                                            parameters = params)
    ref_10 = cv2.imread(os.path.join(dir_name, 'markers/m10.jpg'), 0)
    corners_src10, ids_src10, rejected_src10 = aruco.detectMarkers(ref_10, aruco_dict,
                                            parameters = params)
    ref_11 = cv2.imread(os.path.join(dir_name, 'markers/m11.jpg'), 0)
    corners_src11, ids_src11, rejected_src11 = aruco.detectMarkers(ref_11, aruco_dict,
                                            parameters = params)
    ref_12 = cv2.imread(os.path.join(dir_name, 'markers/m12.jpg'), 0)
    corners_src12, ids_src12, rejected_src12 = aruco.detectMarkers(ref_12, aruco_dict,
                                            parameters = params)
    ref_13 = cv2.imread(os.path.join(dir_name, 'markers/m13.jpg'), 0)
    corners_src13, ids_src13, rejected_src13 = aruco.detectMarkers(ref_13, aruco_dict,
                                            parameters = params)
    ref_14 = cv2.imread(os.path.join(dir_name, 'markers/m14.jpg'), 0)
    corners_src14, ids_src14, rejected_src14 = aruco.detectMarkers(ref_14, aruco_dict,
                                            parameters = params)
    ref_15 = cv2.imread(os.path.join(dir_name, 'markers/m15.jpg'), 0)
    corners_src15, ids_src15, rejected_src15 = aruco.detectMarkers(ref_15, aruco_dict,
                                            parameters = params)
    ref_16 = cv2.imread(os.path.join(dir_name, 'markers/m16.jpg'), 0)
    corners_src16, ids_src16, rejected_src16 = aruco.detectMarkers(ref_16, aruco_dict,
                                            parameters = params)
    ref_17 = cv2.imread(os.path.join(dir_name, 'markers/m17.jpg'), 0)
    corners_src17, ids_src17, rejected_src17 = aruco.detectMarkers(ref_17, aruco_dict,
                                            parameters = params)
    ref_18 = cv2.imread(os.path.join(dir_name, 'markers/m18.jpg'), 0)
    corners_src18, ids_src18, rejected_src18 = aruco.detectMarkers(ref_18, aruco_dict,
                                            parameters = params)
    ref_19 = cv2.imread(os.path.join(dir_name, 'markers/m19.jpg'), 0)
    corners_src19, ids_src19, rejected_src19 = aruco.detectMarkers(ref_19, aruco_dict,
                                            parameters = params)
    ref_20 = cv2.imread(os.path.join(dir_name, 'markers/m20.jpg'), 0)
    corners_src20, ids_src20, rejected_src20 = aruco.detectMarkers(ref_20, aruco_dict,
                                            parameters = params)
    count = 0
    while(1):

        _, frame = cap1.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        corners, ids, rejected = aruco.detectMarkers(gray, aruco_dict,
                                                    parameters = params)
        if np.all(ids != None):
            rvec_list = []
            tvec_list = []
            aruco.drawDetectedMarkers(frame, corners)
            for i in range(len(ids)):
                rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(corners[i], 0.05, mtx, dist)
                #aruco.drawAxis(frame, mtx, dist, rvecs[0], tvecs[0], 0.05)
                cv2.putText(frame, str(ids[i][0]),
                                    tuple(corners[i][0][2]),
                                    font, 0.5, (0, 0, 255), 1, 4)
                rvec_list.append(rvecs)
                #print(tvecs[0][0])
                tvec_list.append(tvecs)
            if ids == ids_src1:
                homography, mask = cv2.findHomography(corners_src1[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_1, projection, ref_1, False)
                    except:
                        pass
            elif ids == ids_src2:
                homography, mask = cv2.findHomography(corners_src2[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_2, projection, ref_2, False)
                    except:
                        pass
            elif ids == ids_src3:
                homography, mask = cv2.findHomography(corners_src3[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_3, projection, ref_3, False)
                    except:
                        pass
            elif ids == ids_src4:
                homography, mask = cv2.findHomography(corners_src4[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_4, projection, ref_4, False)
                    except:
                        pass
            elif ids == ids_src5:
                homography, mask = cv2.findHomography(corners_src5[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_5, projection, ref_5, False)
                    except:
                        pass                        
            elif ids == ids_src6:
                homography, mask = cv2.findHomography(corners_src6[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_6, projection, ref_6, False)
                    except:
                        pass
            elif ids == ids_src7:
                homography, mask = cv2.findHomography(corners_src7[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_7, projection, ref_7, False)
                    except:
                        pass
            elif ids == ids_src8:
                homography, mask = cv2.findHomography(corners_src8[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_8, projection, ref_8, False)
                    except:
                        pass
            elif ids == ids_src9:
                homography, mask = cv2.findHomography(corners_src9[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_9, projection, ref_9, False)
                    except:
                        pass
            elif ids == ids_src10:
                homography, mask = cv2.findHomography(corners_src10[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_10, projection, ref_10, False)
                    except:
                        pass
            elif ids == ids_src11:
                homography, mask = cv2.findHomography(corners_src11[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_11, projection, ref_11, False)
                    except:
                        pass
            elif ids == ids_src12:
                homography, mask = cv2.findHomography(corners_src12[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_12, projection, ref_12, False)
                    except:
                        pass
            elif ids == ids_src13:
                homography, mask = cv2.findHomography(corners_src13[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_13, projection, ref_13, False)
                    except:
                        pass
            elif ids == ids_src14:
                homography, mask = cv2.findHomography(corners_src14[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_14, projection, ref_14, False)
                    except:
                        pass
            elif ids == ids_src15:
                homography, mask = cv2.findHomography(corners_src15[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_15, projection, ref_15, False)
                    except:
                        pass
            elif ids == ids_src16:
                homography, mask = cv2.findHomography(corners_src16[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_16, projection, ref_16, False)
                    except:
                        pass
            elif ids == ids_src17:
                homography, mask = cv2.findHomography(corners_src17[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_17, projection, ref_17, False)
                    except:
                        pass
            elif ids == ids_src18:
                homography, mask = cv2.findHomography(corners_src18[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_18, projection, ref_18, False)
                    except:
                        pass
            elif ids == ids_src19:
                homography, mask = cv2.findHomography(corners_src19[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_19, projection, ref_19, False)
                    except:
                        pass
            elif ids == ids_src20:
                homography, mask = cv2.findHomography(corners_src20[0], corners[0], cv2.RANSAC, 5.0)
                if homography is not None:
                    try:
                        # obtain 3D projection matrix from homography matrix and camera parameters
                        projection = projection_matrix(mtx, homography)
                        # project cube or model
                        frame = render(frame, obj_20, projection, ref_20, False)
                    except:
                        pass

        img = cv2.imread('clk4.png', 1)
        img1 = cv2.imread('calendar3.png', 1)
        img2 = cv2.imread('tv.png', 1)
        rows2, cols2, _ = img2.shape
        rows, cols, xyz = img.shape
        row, col, xsd = img1.shape
        date = time.ctime()
        if (count % 400==0):
            info,c_t_abs,c_t,c_scores = showmatch()
        img = cv2.putText(img, str(str(date).split(" ")[3]), (15,40), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX , 0.7, (0,0,0),1)
        img2 = cv2.putText(img2, "LIVE" , (20,22), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0,0,255), 1)
        img2 = cv2.putText(img2, c_t[0].text+' VS '+c_t[1].text, (70,22), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0,0,0), 1)
        img2 = cv2.putText(img2, c_t_abs[0].text+'      '+c_scores[0].text, (20,44), cv2.FONT_HERSHEY_DUPLEX, 0.45, (0,0,0), 1)
        img2 = cv2.putText(img2, c_t_abs[1].text+'      '+c_scores[1].text, (20,60), cv2.FONT_HERSHEY_DUPLEX, 0.45, (0,0,0), 1)
        # img1 = cv2.putText(img1, str(str(date).split(" ")[4]), (30,85), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX , 0.7, (255,0,0),1)
        # img1 = cv2.putText(img1, str(str(date).split(" ")[1]), (15,60), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX , 0.7, (255,0,0),1)
        # img1 = cv2.putText(img1, str(str(date).split(" ")[2]), (70,60), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX , 0.7, (255,0,0),1)
        frame = cv2.resize(frame,(950,980),interpolation = cv2.INTER_AREA)
        #frame = cv2.putText(frame,voice,(600,400), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX , 0.7, (255,0,0),1)
        modification2 = cv2.addWeighted(img2, 0.8, frame[600:600+rows2, 40:40+cols2], 0.7, 2)
        modification = cv2.addWeighted(img, 0.3, frame[60:60+rows, 40:40+cols], 0.7, 2)
        frame[60:60+rows, 40:40+cols] = modification
        frame[600:600+rows2, 40:40+cols2] = modification2
        count = count+1

        # modification1 = cv2.addWeighted(img1, 0.6, frame[55:55+row,800:800+col], 0.4, 2)
        # frame[40:40+row, 800:800+col] = modification1
        cv2.imshow('Pose Estimation', frame)
        if cv2.waitKey(1) == ord('q'):
            break

    cap1.release()
    cv2.destroyAllWindows()
    return rvec_list, tvec_list
    def deleteframes(self, team_id, file_name, contours, flag=True):
        count = 0
        #name = "team_id_" + str(team_id[0]) + ".png"
        name = "team_id_" + str(self.num) + ".png"
        #print(name)
        cap = cv2.VideoCapture(file_name)

        flag_pm = 0
        li_pm = []
        count_pm = 0
        pm_list = []
        start = 0

        while (cap.isOpened()):

            #print("I am in deleteframes")
            ret, image = cap.read()
            if ret == False:
                break

            tl = 0
            tr = 0
            bl = 0
            br = 0
            tlx = 0
            tly = 0
            blx = 0
            bly = 0
            trix = 0
            triy = 0
            brx = 0
            bry = 0
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
            gray = clahe.apply(gray)
            gray = cv2.GaussianBlur(gray, (5, 5), 0)
            aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
            parameters = aruco.DetectorParameters_create()
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray, aruco_dict, parameters=parameters)
            gray = aruco.drawDetectedMarkers(gray, corners, ids)
            # cv2.imshow('frame', gray)

            for a in corners:
                tlx = a[0][0][0]
                tly = a[0][0][1]
                trix = a[0][1][0]
                triy = a[0][1][1]
                blx = a[0][3][0]
                bly = a[0][3][1]
                brx = a[0][2][0]
                bry = a[0][2][1]

            if ret == True and (tlx, tly, trix, triy, blx, bly, brx,
                                bry) != (0, 0, 0, 0, 0, 0, 0, 0):
                flag = True
                #print("printing frame" + str(count))
                #count += 1

                #flag_pm,li_pm,count_pm,pm_list,start = self.physical_marker(image,flag_pm=flag_pm,li_pm=li_pm,count_pm=count_pm,pm_list=pm_list,start=start)

                warped_frame = self.warping(image, contours)

                self.filter_top_of_robot(warped_frame)

            elif (tlx, tly, trix, triy, blx, bly, brx, bry) == (0, 0, 0, 0, 0,
                                                                0, 0, 0):
                if flag == True:
                    count += 1
                    print("count = " + str(count))
                    flag = False

            #cv2.imshow("Original", image)
            #cv2.waitKey(1)

        cap.release()
        cv2.destroyAllWindows()
        #print("i am out of delete_frames")
        cv2.imwrite(name, self.img)
        #cv2.imwrite("circleplot.png", img2)

        return (name, count, self.img, img_with_circles)  #,count_pm,pm_list)
Exemple #5
0
from imutils.video import VideoStream
import time
import sys
from calibration import calibrate
import os

print("[INFO] Use Ctrl+C to exit.")
print("[INFO] calibrating camera...")
ret, camera_matrix, dist_coeffs = calibrate()
if ret:
    print("[INFO] attained camera calibration values.")
else:
    print("[ERROR] failed to get camera calibration values...")

arucoDict = auo.Dictionary_get(auo.DICT_6X6_1000)
arucoParams = auo.DetectorParameters_create()

print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)

while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 1000 pixels with , width=1000.
    frame = vs.read()
    frame = imutils.resize(frame)
    # detect ArUco markers in the input frame
    (corners, ids, rejected) = auo.detectMarkers(frame,
                                                 arucoDict,
                                                 parameters=arucoParams)
    if len(corners) > 0:
import std_msgs, std_srvs
import numpy as np
import cv2
import cv2.aruco as aruco
import os
import pickle
from aruco_hand_eye.srv import aruco_info, aruco_infoResponse
import time
import pyrealsense2 as rs
NUMBER = 5
# # Constant parameters used in Aruco methods
# ARUCO_PARAMETERS = aruco.DetectorParameters_create()
# ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_100)

# Constant parameters used in Aruco methods
ARUCO_PARAMETERS = aruco.DetectorParameters_create()
ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_50)
CHARUCOBOARD_ROWCOUNT = 7
CHARUCOBOARD_COLCOUNT = 5

# Create grid board object we're using in our stream
CHARUCO_BOARD = aruco.CharucoBoard_create(
    squaresX=CHARUCOBOARD_COLCOUNT,
    squaresY=CHARUCOBOARD_ROWCOUNT,
    squareLength=0.0322,
    markerLength=0.0216,
    # squareLength=0.04,
    # markerLength=0.02,
    dictionary=ARUCO_DICT)

Exemple #7
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # times = []

    # loop over frames from the video stream
    while True:
        # Start time
        # start = time.time()

        ret, frame = vs.read()

        # operations on the frame
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # set dictionary size depending on the aruco marker selected
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)

        # detector parameters can be set here (List of detection parameters[3])
        parameters = aruco.DetectorParameters_create()
        parameters.adaptiveThreshConstant = 10

        # lists of ids and the corners belonging to each id
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)

        # font for displaying text (below)
        font = cv2.FONT_HERSHEY_SIMPLEX

        # check if the ids list is not empty
        # if no check is added the code will crash
        if np.all(ids != None):

            # estimate pose of each marker and return the values
            # rvet and tvec-different from camera coefficients
            rvec, tvec = aruco.estimatePoseSingleMarkers(
                corners, 0.05, mtx, dist)
            # (rvec-tvec).any() # get rid of that nasty numpy value array error

            for i in range(0, ids.size):
                # draw axis for the aruco markers
                aruco.drawAxis(frame, mtx, dist, rvec[i], tvec[i], 0.1)

            # draw a square around the markers
            aruco.drawDetectedMarkers(frame, corners)

            # code to show ids of the marker found
            strg = ''
            for i in range(0, ids.size):
                strg += str(ids[i][0]) + ', '

            cv2.putText(frame, "Id: " + strg, (0, 64), font, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)

        else:
            # code to show 'No Ids' when no markers are found
            cv2.putText(frame, "No Ids", (0, 64), font, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)

        # end = time.time()
        #
        # times.append(end - start)
        #
        # if len(times) > 10:
        # 	times = times[:9]
        #
        # cv2.putText(frame, f"FPS: {round(len(times) / sum(times))}", (0, 100), font, 1, (0, 255, 0), 2, cv2.LINE_AA)

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
def main(marker_length, marker_separation):

    # opencv/aruco settings
    cam = cv2.VideoCapture(0)
    ardict = aruco.Dictionary_get(aruco.DICT_4X4_1000)
    board = aruco.GridBoard_create(4, 5, marker_length, marker_separation,
                                   ardict)

    parameters = aruco.DetectorParameters_create()
    parameters.minMarkerPerimeterRate = 0.03

    # load camera parameters
    with open("calib.json", 'r') as f:
        data = json.load(f)

    dr = DrawClass(data.get('camera_matrix'), data.get('dist_coeff'))
    # and make some variables for aruco
    mtx = dr.mtx
    dist = dr.dist

    # print(cv2.calibrationMatrixValues(mtx, (640, 480), 3.6, 2.7))

    # globals variables
    cs = CoordinatesSystem()

    while cam.isOpened():
        k = cv2.waitKey(16)

        ret, frame = cam.read()
        info_screen = np.ones((500, 1000, 3), np.uint8)
        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

        corners, ids, rejected_img_points = aruco.detectMarkers(
            gray, ardict, parameters=parameters)

        if corners is not None:
            frame = dr.draw_markers(frame, corners, ids)

            r_vecs, t_vecs, _ = aruco.estimatePoseSingleMarkers(
                corners, marker_length, mtx, dist)

            markers = [
                Marker(ids[i][0], r_vecs[i], t_vecs[i])
                for i in range(len(corners))
            ]
            frame = dr.draw_axis_of_markers(frame, markers, 1)
            info_screen = dr.print_text(info_screen, markers)

            if len(markers) == 2:
                # rv, tv = relative_position(markers[0].r_vec, markers[0].t_vec, markers[1].r_vec, markers[1].t_vec)
                tv = relative_position2(markers[0].r_vec, markers[0].t_vec,
                                        markers[1].t_vec)
                info_screen = dr.draw_debug_lines(info_screen, tv)
                print(tv)

            if len(markers) == 1 and k == ord('p'):
                cs.reset(markers[0].r_vec, markers[0].t_vec)
                print(cs.rot_matrix)

            if len(markers) == 1 and cs.fcs_r_vec is not None:
                tv3 = cs.get_coordinates_in_cs(markers[0].t_vec)
                frame = dr.draw_axis_points(frame, cs.fcs_r_vec, cs.fcs_t_vec)
                info_screen = dr.draw_debug_lines(info_screen, tv3)
                print(tv3)

        cv2.line(frame, (320, 240), (320, 240), (0, 0, 0), 4)

        cv2.imshow("frame", frame)
        cv2.imshow("info", info_screen)

        if k == ord('q'):
            cam.release()
            break
def main():

    ser = serial.Serial('COM3', 115200, timeout=0)
    print(ser.name)

    # targetState = (20,80,0)
    robotNode = 1
    robotTargetLoc1 = (80, 20)
    robotMap = []
    # robotLocation2 = (60,70)
    globalMap = initializeGraph()
    mapping.plotGraph(robotMap, [robotTargetLoc1])
    performRobotWriting = True

    #Holds the values of the four corners of the robotic environment. Goes clockwise starting with the top left corner
    #The ids for the aruco tags for each of the four corners goes from 0 to 3 clockwise, starting with the top left corner
    fourCorners = [[], [], [], []]

    vc = cv2.VideoCapture(1)

    # np.load("sample_images.npz")
    with np.load("sample_images.npz") as data:
        mtx = data['mtx']
        dist = data['dist']
    # ret,frame = vc.read()
    # image = frame
    # time.sleep(1)
    # ret,frame = vc.read()
    # image = frame

    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
    parameters = aruco.DetectorParameters_create()
    iter = 0
    node = 1
    x = ''
    y = ''
    a = ''
    initialized = False
    initialxya = []
    iter = 0
    while (vc.isOpened()):
        # if iter % 90 == 0:
        #     plt.pause(0.001)
        iter = (iter + 1) % 25

        ret, frame = vc.read()
        image = frame
        # image = cv2.imread("52814747.png")
        # image = cv2.imread("arucoTestImage.png")

        # aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            image, aruco_dict, parameters=parameters)
        rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
            corners, 0.1, mtx, dist)
        rotations = []
        if ids is not None:
            for i in range(len(ids)):
                if ids[i] == 5:
                    if rvec[i][0][0] > 0:
                        # rvec[i][0][0] = -1 * rvec[i][0][0]
                        image = aruco.drawAxis(image, mtx, dist, rvec[i],
                                               tvec[i], 0.1)
                        a = rvec[i][0][1]
                        # print(rvec[i])
                        a = objectLocalization.convertAtoRadians(a)
                # if ids[i] < 5:
                #     print("Drawing ID: " + str(ids[i]) )
                #     print("before")
                #     print(rvec[i])
                #     image = aruco.drawAxis(image,mtx,dist,rvec[i],tvec[i],0.1)
                #     print("after")

        if ids is not None:
            for i in range(len(ids)):
                if ids[i] == 0:
                    fourCorners[0] = objectLocalization.getRectMid(
                        corners[i][0])
                elif ids[i] == 1:
                    fourCorners[1] = objectLocalization.getRectMid(
                        corners[i][0])
                elif ids[i] == 2:
                    fourCorners[2] = objectLocalization.getRectMid(
                        corners[i][0])
                elif ids[i] == 3:
                    fourCorners[3] = objectLocalization.getRectMid(
                        corners[i][0])
                elif ids[i] == 5:
                    if (len(fourCorners[0]) > 0 and len(fourCorners[1]) > 0
                            and len(fourCorners[2]) > 0
                            and len(fourCorners[3]) > 0):
                        [x, y] = objectLocalization.scalePoint(
                            fourCorners,
                            objectLocalization.getRectMid(corners[i][0]))
                        [x,
                         y] = objectLocalization.convertToRobotLocation(x, y)

        print(fourCorners)
        # print(corners)
        # print(ids)

        # print(corners, ids, rejectedImgPoints)

        #NOTE: Two lines below used to draw markers originally
        # aruco.drawDetectedMarkers(image, corners, ids)
        # aruco.drawDetectedMarkers(image, rejectedImgPoints, borderColor=(100, 0, 240))

        cv2.imshow('Video', image)

        key = cv2.waitKey(20)

        if key == 27:
            # exit on ESC
            break

        # iter = 0
        # while (iter < 5):
        temp = ser.read(1000)
        # temp = ser.readline()
        if temp != b'':
            if temp == b'q' or temp == 'Q':
                ser.close()
                break
            # print(temp)
            # if temp[0] == '$':
            if len(temp) > 0:
                RFIDinfo = parseRFIDInfo(temp)
                print("RFID Info: ")
                print(RFIDinfo)
                # print("RFIDInfo: " + str(RFIDinfo))
                if RFIDinfo != None:
                    if mapping.euclideanDistance(
                        (x, y), (RFIDinfo[0][0], RFIDinfo[0][1])
                    ) < 10 and mapping.euclideanDistance(
                        (x, y), robotTargetLoc1
                    ) < 10:  #Make sure we're in the range of the tag we think we are
                        (newTargetLoc, info, preprocessedMap
                         ) = mapping.updateMapandDetermineNextStep(
                             robotMap, globalMap, robotTargetLoc1, RFIDinfo)
                        print("------------------" + str(newTargetLoc) +
                              "---------------------")
                        if performRobotWriting:
                            message = prepRFIDInfo(robotTargetLoc1, info,
                                                   preprocessedMap)
                            message = message + "\r\n"
                            ser.write(str.encode(message))
                            print("***********Writing Message*************")
                            print(message)
                            ser.flush()

                        mapping.plotGraph(preprocessedMap, [robotTargetLoc1])
                        robotTargetLoc1 = newTargetLoc
                        # mapping.plotGraph(robotMap,[robotTargetLoc1])

                # if len(code) >= len(shortestSample):
                # # if not xVals and not yVals:
                #     tempState = (int(xVals),int(yVals),0)
                #     if tempState != (0,0,0):
                #         targetState = tempState
                #         print("------------- NEW TARGET STATE ----------")
                #         print(targetState)
        first = True
        job = multiprocessing.Process(target=plotMap, args=(robotMap, (x, y)))

        # time.sleep(0.1)
        # print(x == '')
        # print(y == '')
        # print(a)
        # time.sleep(0.05)
        if (x != '' and y != '' and a != ''):
            # print("x: " + str(x))
            # print("y: " + str(y))
            #
            #     plt.pause(0.001)
            # job.start()
            # job.join()
            # if iter % 45 == 0:
            # job.start()
            # mapping.plotGraph(robotMap,[(x,y)])
            # plotMap(robotMap,(x,y))
            # if first:
            #     job.start()

            if (
                    len(initialxya) < 10
            ):  #looking to find the average of the first several measurements to get an accurate starting point
                initialxya.append((x, y, a))

            else:
                if not initialized:
                    avex = 0
                    avey = 0
                    avea = 0
                    #NOTE: Possible bug could occur if the robot is aligned along 0 degrees since this could fluctuate
                    #between 0 and 2 pi for values (resulting in an average of pi, the exact oppposite direction)
                    for i in range(10):
                        avex = avex + initialxya[i][0]
                        avey = avey + initialxya[i][1]
                        avea = avea + initialxya[i][2]
                    avex = avex / len(initialxya)
                    avey = avey / len(initialxya)
                    avea = avea / len(initialxya)
                    prevxya = (avex, avey, avea)
                    prevxya2 = (avex, avey, avea)
                    initialized = True
                    # print(initialxya)
                    previouslyMissed = False
                    falsePrev = prevxya
                else:
                    # if withinRange(a,prevxya2[2], math.pi * 1/ 2) or (withinRange(a,falsePrev[2],math.pi * 1 /2) and previouslyMissed): #Make sure the angle doesn't flip an unreasonable amount
                    #     if withinRange(a,prevxya2[2], math.pi * 1/ 2) or (withinRange(a,falsePrev[2],math.pi * 1 /2) and previouslyMissed): #Make sure the angle doesn't flip an unreasonable amount

                    # print("Target State: " + str(targetState))
                    # print("A: " + str(a))
                    if (True):
                        # if (not withinRange(a,prevxya[2] - math.pi,math.pi*4/8)):
                        # if (withinRange(a,prevxya2[2], math.pi * 3/ 4) and withinRange(a,prevxya[2], math.pi * 2/4)) or (previouslyMissed and withinRange(a,falsePrev[2],math.pi * 1 /8)): #3/4 1/2Make sure the angle doesn't flip an unreasonable amount
                        message = robotControl.prepareControlMessage(
                            (x, y, a), robotTargetLoc1)
                        message = "$" + str(robotNode) + str(
                            message) + '\r\n'  #preappend the robot node number
                        ser.write(str.encode(message))
                        # print(message)
                        ser.flush()
                        # if (withinRange(a,falsePrev[2],math.pi * 1 /2) and previouslyMissed):
                        #     prevxya2 = falsePrev
                        # else:
                        # prevxya2 = prevxya
                        prevxya2 = prevxya
                        prevxya = (x, y, a)
                        # print(initialxya)
                        # print((avex,avey,avea))
                        # ser.close()
                        # time.sleep(0.1)
                        # print("Just flushed")
                    else:
                        # print("MISSED IT")
                        # print(prevxya)
                        # print(prevxya2)
                        # print(x)
                        # print(y)
                        # print("A: " + str(a))
                        falsePrev = (x, y, a)
                        previouslyMissed = True

    cv2.waitKey(0)
    cv2.destroyAllWindows()
    message = "$" + str(
        robotNode) + "f 0\r\n"  #preappend the robot node number
    ser.write(str.encode(message))
    ser.close()
Exemple #10
0
def main():
    global aruco_and_middle, stop_pls, filtered
    color = (255, 255, 0)

    # Set aruco sizes and then load the dictionaries
    if six_by_six:
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_1000)
    else:
        aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_1000)
    parameters = aruco.DetectorParameters_create()

    # Keep running
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        b = time.time()

        image = frame.array
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)

        marker = aruco.drawDetectedMarkers(gray,
                                           corners,
                                           ids,
                                           borderColor=color)

        erdil = cv2.erode(gray, kernel, iterations=5)
        erdil = cv2.dilate(erdil, kernel, iterations=5)

        mask = cv2.inRange(erdil, lower, upper)
        mask = cv2.bitwise_not(mask)
        ret, thresh = cv2.threshold(mask, 255, 255, 255)
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        res = cv2.bitwise_and(image, image, mask=mask)

        middle = -1
        middles = []
        if ids is None:
            for cnt in contours:
                x, y, w, h = cv2.boundingRect(cnt)
                middles.append(x + w / 2.0)

        else:
            for corner in corners[0][0]:
                middles.append(corner[0])

        if (len(middles) > 0):
            middle = sum(middles) / len(middles)
        else:
            middle = -1

        if filtered is None:
            filtered = middle

        # Exponential moving average filter
        else:
            filtered = 0.7 * middle + 0.3 * filtered

        send = round(concat(filtered), 3)
        # Set the global variable
        aruco_and_middle = (ids, send)

        if ids is not None:
            # Used for statistics
            e = time.time()
            avg.append(e - b)

        # The image displays
        cv2.drawContours(res, contours, -1, (0, 255, 0), 3)
        res = cv2.circle(res, (int(filtered), 100), 5, color, 3)

        cv2.imshow("Marker", marker)
        cv2.imshow("Result", res)

        key = cv2.waitKey(1) & 0xFF
        rawCapture.truncate(0)
        if key == ord("q"):
            print("Average aruco detection:", sum(avg) / len(avg))
            break
Exemple #11
0
    def get_transform(self):
        
        #aruco width and height
        
        ret, self.aruco_frame = self.video.read()
 
        gray = cv2.cvtColor(self.aruco_frame, cv2.COLOR_BGR2GRAY)
        
        T = cv2.getTrackbarPos('T','trackbars')
        retval, thesh = cv2.threshold(gray,T,255,cv2.THRESH_BINARY)
    
        cv2.imshow("Thresh",thesh)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250) 
        parameters =  aruco.DetectorParameters_create()
     
        #lists of ids and the corners beloning to each ids
        corners, ids, rejectedImgPoints = aruco.detectMarkers(thesh, aruco_dict, parameters=parameters)
        
        self.aruco_frame = aruco.drawDetectedMarkers(self.aruco_frame, corners,ids,(255,255,0))
        platform_center_x = 0
        platform_center_y = 0
        angle = 0
        
        
        #If an aruco marker is found
        if np.all(ids != None):
            
            #For all aruco markers found
            for i in range(0,int(ids.size)):
                
                if ids[0][i] == 34:
                    
                    aruco_x_coor,aruco_y_coor,angle,conversion_factor = self.__get_aruco_coor_angle(corners,i)
                
                    #Distance from the center of the aruco marker to the centre of the object in mm
                    mach_distance_x = 1#200
                    mach_distance_y = 800#480
                    
                    #distance from the center of the aruco marker to the bottom left corner of the platform
                    distance_offset = 0
                    
                    station_center_x,station_center_y = self.__get_object_center_from_aruco(aruco_x_coor,aruco_y_coor,angle,mach_distance_x,mach_distance_y,distance_offset,conversion_factor)
                
                    station_height = 200*conversion_factor
                    station_width = 200*conversion_factor
                    self.__detect_blocks_in_roi(station_center_x,station_center_y,angle,conversion_factor,station_height,station_width)
                
                elif ids[0][i] == 43:
                    
                    plat_distance_x = 200
                    plat_distance_y = 480
                    distance_offset = 50
                    
                    aruco_x_coor,aruco_y_coor,angle,conversion_factor = self.__get_aruco_coor_angle(corners,i)
                    platform_center_x,platform_center_y = self.__get_object_center_from_aruco(aruco_x_coor,aruco_y_coor,angle,plat_distance_x,plat_distance_y,distance_offset,conversion_factor)
                    platform_height = 500*conversion_factor
                    platform_width = 550*conversion_factor
                    self.__mask_object_with_aruco_coor(platform_center_x,platform_center_y,angle,conversion_factor,platform_height,platform_width)
                    
                else:
                    print("Unrecongnised ArUco marker")
                    
                    
                found = 1
                transform_dict = {
                        "state" : found,
                        "x" : platform_center_x,
                        "y" : platform_center_y,
                        "angle" : angle
                        }
                
                return (transform_dict)
        else:
            found = 0
            transform_dict = {
                        "state" : found,
                        "x" : 0,
                        "y" : 0,
                        "angle" : 0
                        }
            return (transform_dict)
def track_truck(rp):
    cap = cv2.VideoCapture(1)
    w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    #	print w
    #	print h
    d_pos = 1  #distance difference between cb and truck
    while True:
        ret, frame = cap.read()
        if ret == False:
            continue
        framebw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)
        parameters = aruco.DetectorParameters_create()
        corners, ids, _ = aruco.detectMarkers(framebw,
                                              aruco_dict,
                                              parameters=parameters)
        #		print ids
        i = ids.tolist().index([0])  #gives total aruco markers
        #		print i
        l = 0
        l1 = 0
        z = int(ids[i][0])
        #		print z
        cv2.circle(frame, (corners[i][0][0][0], corners[i][0][0][1]), 5,
                   (125, 125, 125), -1)
        cv2.circle(frame, (corners[i][0][1][0], corners[i][0][1][1]), 5,
                   (0, 255, 0), -1)
        cv2.circle(frame, (corners[i][0][2][0], corners[i][0][2][1]), 5,
                   (180, 105, 255), -1)
        cv2.circle(frame, (corners[i][0][3][0], corners[i][0][3][1]), 5,
                   (255, 255, 255), -1)
        x = ((corners[i][0][0][0] + corners[i][0][2][0]) / 2)
        y = ((corners[i][0][0][1] + corners[i][0][2][1]) / 2)
        cv2.circle(frame, (int(x), int(y)), 5, (255, 0, 0), -1)  #center
        x1 = (corners[i][0][0][0] + corners[i][0][1][0]) / 2
        y1 = (corners[i][0][0][1] + corners[i][0][1][1]) / 2
        cv2.circle(frame, (int(x1), int(y1)), 5, (0, 0, 255),
                   -1)  #midpoint of starting edge
        cv2.line(frame, (int(x), int(y)), (int(x1), int(y1)), (255, 0, 0), 3)
        a = y1 - y
        b = x1 - x
        theta = 3.14 - math.atan2(
            a, b
        )  #taking negative to the resultant angle .Angle lies between [-pi,pi]
        #		print theta
        #		print '........'
        font = cv2.FONT_HERSHEY_SIMPLEX
        q = (
            x - 320
        ) * 0.003351064  #0.003351064 #(x-320)*0.00303077335822 #0.00353077335822ipx along x axis=0.003515625 m
        r = (
            y - 240
        ) * 0.003351064  #0.003389831 #(y-240)*0.00302814921311 #0.00352814921311ipx along y axis=0.0034375 m
        emptyBuff = bytearray()
        position = [-q, r, +4.1000e-02]
        orientation = [0, 0, theta]
        #		print position
        #		print orientation
        x1 = position[0] - rp[0]
        y1 = position[1] - rp[1]
        d_pos = math.sqrt((x1)**2 + (y1)**2)
        if d_pos < 0.015:
            break

    cap.release()
    cv2.destroyAllWindows()
    return
def setobj(flag):
    cap = cv2.VideoCapture(1)
    w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    #	print w
    #	print h
    time.sleep(0)
    while True:
        ret, frame = cap.read()

        framebw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)
        parameters = aruco.DetectorParameters_create()
        corners, ids, _ = aruco.detectMarkers(framebw,
                                              aruco_dict,
                                              parameters=parameters)
        #		print ids
        t = len(ids)  #gives total aruco markers
        #		print t
        l = 0
        l1 = 0
        for i in range(0, t):
            z = int(ids[i][0])
            #			print'>>>>>>>'
            cv2.circle(frame, (corners[i][0][0][0], corners[i][0][0][1]), 5,
                       (125, 125, 125), -1)
            cv2.circle(frame, (corners[i][0][1][0], corners[i][0][1][1]), 5,
                       (0, 255, 0), -1)
            cv2.circle(frame, (corners[i][0][2][0], corners[i][0][2][1]), 5,
                       (180, 105, 255), -1)
            cv2.circle(frame, (corners[i][0][3][0], corners[i][0][3][1]), 5,
                       (255, 255, 255), -1)
            x = ((corners[i][0][0][0] + corners[i][0][2][0]) / 2)
            y = ((corners[i][0][0][1] + corners[i][0][2][1]) / 2)
            cv2.circle(frame, (int(x), int(y)), 5, (255, 0, 0), -1)  #center
            x1 = (corners[i][0][0][0] + corners[i][0][1][0]) / 2
            y1 = (corners[i][0][0][1] + corners[i][0][1][1]) / 2
            cv2.circle(frame, (int(x1), int(y1)), 5, (0, 0, 255),
                       -1)  #midpoint of starting edge
            cv2.line(frame, (int(x), int(y)), (int(x1), int(y1)), (255, 0, 0),
                     3)
            a = y1 - y
            b = x1 - x
            theta = 3.14 - math.atan2(
                a, b
            )  #taking negative to the resultant angle .Angle lies between [-pi,pi]
            #			print theta
            #			print '........'
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame, str(theta), (int(x + 100), int(y - 20)), font,
                        0.8, (0, 255, 0), 2, cv2.LINE_AA)
            q = (
                x - 320
            ) * 0.003351064  #0.003351064 #(x-320)*0.00303077335822 #0.00353077335822ipx along x axis=0.003515625 m
            r = (
                y - 240
            ) * 0.003351064  #0.003389831 #(y-240)*0.00302814921311 #0.00352814921311ipx along y axis=0.0034375 m
            emptyBuff = bytearray()
            orientation = [0, 0, theta]
            if z == 0:
                position = [-q, r, +4.1000e-02]
                returnCode1, outInts1, path_pos, outStrings1, outBuffer1 = vrep.simxCallScriptFunction(
                    clientID, 'script', vrep.sim_scripttype_childscript,
                    'shift', [], position, [], emptyBuff,
                    vrep.simx_opmode_blocking)
                returnCode1, outInts1, path_pos, outStrings1, outBuffer1 = vrep.simxCallScriptFunction(
                    clientID, 'script', vrep.sim_scripttype_childscript,
                    'rotate', [], orientation, [], emptyBuff,
                    vrep.simx_opmode_blocking)
#				print '0303030303'
            else:
                if flag == 0:
                    if z in freshfruits_id:
                        position = [-q, r, +4.1000e-02]
                        if position[0] > 0:
                            if position[1] > 0:
                                returnCode = vrep.simxSetObjectPosition(
                                    clientID, freshfruits[0], -1, position,
                                    vrep.simx_opmode_oneshot_wait)
                            if position[1] < 0:
                                returnCode = vrep.simxSetObjectPosition(
                                    clientID, freshfruits[1], -1, position,
                                    vrep.simx_opmode_oneshot_wait)
                        if position[0] < 0:
                            returnCode = vrep.simxSetObjectPosition(
                                clientID, freshfruits[2], -1, position,
                                vrep.simx_opmode_oneshot_wait)
#						print returnCode
#						print r
#						print -q
#						print '03030330-------------'
                    if z in damagedfruits_id:
                        position = [-q, r, +4.1000e-02]
                        returnCode = vrep.simxSetObjectPosition(
                            clientID, damagedfruits[l1], -1, position,
                            vrep.simx_opmode_oneshot_wait)
                        #						print returnCode
                        #						print r
                        #						print -q
                        #						print '03030330-------------'
                        l1 = l1 + 1

        flag = flag + 1
        cv2.imshow('frame', frame)
        if i == t - 1:
            cap.release()
            cv2.destroyAllWindows()
            return
Exemple #14
0
def cv_Demo2():
    rawCapture = PiRGBArray(camera)
    camera.iso = 100
    # Wait for the automatic gain control to settle
    time.sleep(2)
    global angle
    global marker_distance_cm
    # Now fix the values
    camera.shutter_speed = camera.exposure_speed
    camera.exposure_mode = 'off'
    g = camera.awb_gains
    camera.awb_mode = 'off'
    camera.awb_gains = g
    # allow the camera to warmup
    time.sleep(0.1)
    camera.framerate = 32
    aruco_dictionary = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()
    timer = 0
    xwidth = 54 * (math.pi / 180)
    ywidth = 41 * (math.pi / 180)
    marker_flag = 1
    # start continuous picture taking
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        marker_found = False

        # convert to grayscale
        image = frame.array
        cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            image, aruco_dictionary, parameters=parameters)
        if type(ids) == numpy.ndarray:
            print("Marker found")
            marker_found = True
            output_array = numpy.ndarray(len(ids))
            index = 0
            for i in ids:
                for j in i:
                    output_array[index] = j
                    print("Marker Indexes:", end=" ")
                    print(j)
                    #Determine angle of marcker form the center of the camera
                    xcenter = int(
                        (corners[0][0][0][0] + corners[0][0][2][0]) / 2)
                    ycenter = int(
                        (corners[0][0][0][1] + corners[0][0][2][1]) / 2)
                    yheight = abs(
                        int(corners[0][0][0][1] - corners[0][0][2][1]))
                    marker_distance_ft = 490 / yheight
                    print(yheight)
                    print("Marker Distance in ft: ", end="")
                    print(marker_distance_ft)
                    print("Rounding: ", end="")
                    print(round(marker_distance_ft))

                    print(marker_distance_cm)
                    #                    marker_distance_cm = [int(ele) for ele in str(marker_distance_cm) if ele.isdigit()]
                    #print("Marker Distacne in m: ", end="")
                    #print(marker_distance_m)
                    imageMiddleX = image.shape[1] / 2
                    xdist = (xcenter - image.shape[1] / 2)
                    ydist = (ycenter - image.shape[0] / 2)

                    xangle = (xdist / image.shape[1]) * xwidth
                    yangle = (ydist / image.shape[0]) * ywidth

                    # Calculate the angle from the z-axis to the center point
                    # First calculate distance (in pixels to screen) on z-axis
                    a1 = xdist / math.tan(xangle)
                    a2 = ydist / math.tan(yangle)
                    a = (a1 + a2) / 2
                    distance = math.sqrt(pow(xdist, 2) + pow(ydist, 2))
                    #Calculate final angle for each Aruco
                    angle = math.atan2(distance, a) * (180 / math.pi)
                    print("Correction: ", end="")
                    marker_distance_ft = marker_distance_ft * 0.96 - (
                        -2.5244 * pow(angle * math.pi / 180, 2) + 0.027)
                    print(marker_distance_ft)
                    marker_distance_cm = marker_distance_ft * 30.48 * 100
                    marker_distance_cm = int(marker_distance_cm)
                    if (xcenter > imageMiddleX):
                        angle *= -1
                        if (angle < -3.8 and angle > -4.3):
                            angle += 4
                    else:
                        angle += 4
                    if (angle > 0):
                        angle -= 2
                    display_angle = str(angle)
                    print("Angle from camera:", angle, "degrees")
                    lcd.message = ("Beacon Detected \nAngle: %s" %
                                   (display_angle))
                    marker_flag = 1
                    index += 1
                    print()
                    break
        rawCapture.truncate(0)
        if marker_found == False:
            print("No markers found")
            if marker_flag == 1:
                lcd.clear()
                marker_flag = 0
            lcd.message = ("Beacon Not\nDetected")
        else:
            break
Exemple #15
0
def vision():

    # Load the camera calibration values
    Camera = np.load('Calibration_Surface_Book_Rear_Camera.npz')
    CM = Camera['CM']  # camera matrix
    dist_coef = Camera['dist_coef']  # distortion coefficients from the camera

    aruco_dict = aruco.Dictionary_get(
        aruco.DICT_4X4_50)  # Load the aruco dictionary
    pa = aruco.DetectorParameters_create()  # Set the detection parameters

    # Select the correct camera (0) = front camera, (1) = rear camera
    cap = cv2.VideoCapture(1)

    # Set the width and heigth of the camera to 640x480
    cap.set(3, 640)
    cap.set(4, 480)

    # Create two opencv named windows
    cv2.namedWindow("frame-image", cv2.WINDOW_AUTOSIZE)

    # Position the window
    cv2.moveWindow("frame-image", 0, 0)

    t_end = time.time() + 10

    # Execute this continuously
    while time.time() < t_end:
        # Capture current frame from the camera
        ret, frame = cap.read()

        # Convert the image from the camera to Gray scale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Run the detection formula
        corners, ids, rP = aruco.detectMarkers(gray, aruco_dict)

        # # Count the number of Arucos visible
        # try:
        #     IDScount = len(ids)
        # except:
        #     IDScount = 0

        # Calculate the pose of the markers
        rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(
            corners, 53, CM, dist_coef
        )  # <<<< IMPORTANT: number needs changing to width of printed arucos (in mm)
        # Draw the detected markers as an overlay
        out = aruco.drawDetectedMarkers(frame, corners, ids)

        # Create Coordinate Storage Arrays
        X = []  #X Coordinate Locations Array
        Y = []
        Z = []
        ID = []

        # Run loop if ids are detected
        if ids is not None:
            for i, id in enumerate(ids):
                # Overlay the axis on the image
                out = aruco.drawAxis(out, CM, dist_coef, rvecs[i][0][:],
                                     tvecs[i][0][:], 30)
                # Print the tvecs tranformation matrix or Aruco coordinates
                # print("X = {:4.1f} Y = {:4.1f} Z = {:4.1f} ID = {:2d}".format(tvecs[i][0][0], tvecs[i][0][1], tvecs[i][0][2], ids[i][0]))
                X.append(tvecs[i][0][0])
                Y.append(tvecs[i][0][1])
                Z.append(tvecs[i][0][2])
                ID.append(ids[i][0])
                # debugTEST = []

        # Display the original frame in a window and aruco markers
        cv2.imshow('frame-image', frame)

        # If the button q is pressed in one of the windows
        if cv2.waitKey(20) & 0xFF == ord('q'):
            # Exit the While loop
            break

    # When everything done, release the capture
    cap.release()
    # close all windows
    cv2.destroyAllWindows()
    # # exit the kernel
    # exit(0)
    return X, Y, Z, ID, rvecs
Exemple #16
0
def calibrate(cam, align, marker_IDs, num_markers, comm_marker_id, tf_dict,
              num_pts):
    """


    Args:


    Returns:

    """
    tolerance = 4  # degs; set by user

    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()

    k = 0
    A = []
    b = []
    print('Move tool to new pose, and press p to record')
    while (k < num_pts):
        frames = cam.pipeline.wait_for_frames()
        aligned_frames = align.process(frames)
        color_frame = aligned_frames.get_color_frame()
        color_image = np.asanyarray(color_frame.get_data())
        frame = color_image

        cv2.imshow('frame', frame)
        userinput = cv2.waitKey(10)
        if userinput & 0xFF == ord('p'):  # user wants to record point
            font = cv2.FONT_HERSHEY_SIMPLEX
            corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)
            if comm_marker_id in ids:
                index = np.argwhere(ids == comm_marker_id)
                ids = np.delete(ids, index)

            shouldRecord = True
            if ids is not None and len(ids) > 1:
                ids.shape = (ids.size)
                ids = ids.tolist()
                ideal_angle = 360 / (num_markers - 1)
                for i in range(len(ids) - 1):
                    print(ids)
                    marker1 = rvecs[i]  # tf_dict[ids[i]]
                    marker1_rot, _ = cv2.Rodrigues(marker1[0])
                    j = i + 1
                    marker2 = rvecs[j]  # tf_dict[ids[j]]
                    marker2_rot, _ = cv2.Rodrigues(marker2[0])
                    marker1_rot_T = marker1_rot.transpose()
                    rot_btw_1_2 = np.matmul(marker1_rot_T, marker2_rot)
                    angles = rotToEuler(rot_btw_1_2)
                    y_angle = np.absolute(angles[1]) * 180 / 3.1415
                    print(y_angle)
                    if (np.absolute(ids[i] - ids[j]) > 1 and \
                            np.absolute(ids[i] - ids[j]) < num_markers-2) or \
                            y_angle < ideal_angle - tolerance or \
                            y_angle > ideal_angle + tolerance:
                        shouldRecord = False
                        print("Bad orientation found")
                        break

                if shouldRecord:
                    if comm_marker_id in ids:  # if comm_marker_id detected, use point
                        comm_index = ids.index(comm_marker_id)
                        R_j, _ = cv2.Rodrigues(rvecs[comm_index])
                        t_j = tvecs[comm_index]
                        t_j.shape = (3)
                    else:
                        # transformation from marker_i frame to common_marker_id frame
                        R_i_comm, _ = cv2.Rodrigues(tf_dict[ids[0]][0])
                        t_i_comm = tf_dict[ids[0]][1]

                        # transformation from comm_marker_id frame to marker_i
                        R_comm_i = R_i_comm.transpose()
                        t_comm_i = np.matmul(-R_comm_i, t_i_comm)
                        t_comm_i.shape = (3, 1)

                        # transformation from marker_i to camera frame
                        R_i, _ = cv2.Rodrigues(rvecs[0])
                        t_i = tvecs[0]
                        t_i.shape = (3, 1)

                        # combine transformation tgoether to get transformation
                        # from comm_marker_id to camera frame
                        R_j = np.matmul(R_i, R_comm_i)
                        t_j = np.matmul(R_i, t_comm_i) + t_i
                        t_j.shape = (3)

                    # set up for pivot cal least squares equation
                    # A = [ R_j | -I ]
                    # b = [ -p_j ]
                    item1 = np.append(R_j[0, :], [-1, 0, 0])
                    item1 = item1.tolist()
                    A.append(item1)
                    item2 = np.append(R_j[1, :], [0, -1, 0])
                    item2 = item2.tolist()
                    A.append(item2)
                    item3 = np.append(R_j[2, :], [0, 0, -1])
                    item3 = item3.tolist()
                    A.append(item3)

                    b.extend(-t_j)
                    k = k + 1
                    print('Recorded')
            else:
                print("Not enough markers detected. Try again.")

    # Solve least-squares Ax = b
    A = np.asanyarray(A)
    b = np.asanyarray(b)
    x = np.linalg.lstsq(A, b, rcond=None)[0]  # x[0:2] = p_t, x[3:5] = p_pivot
    print(x[0:3])

    return x[0:3]
Exemple #17
0
     [0, 1, 0, 1, 0, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0],
     [0, 0, 0, 0, 0, 0, 0]],
    dtype=np.uint8)
aruco_dict.bytesList[16] = aruco.Dictionary_getByteListFromBits(mybits17)
#17
mybits18 = np.array(
    [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 1, 1, 0, 0, 1, 0],
     [0, 1, 0, 1, 0, 1, 0], [0, 0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1, 0],
     [0, 0, 0, 0, 0, 0, 0]],
    dtype=np.uint8)
aruco_dict.bytesList[17] = aruco.Dictionary_getByteListFromBits(mybits18)

#for i in range(len(aruco_dict.bytesList)):
#    cv2.imwrite("custom_aruco_" + str(i) + ".png", aruco.drawMarker(aruco_dict, i, 128))

parameters = aruco.DetectorParameters_create()  # Marker detection parameters

#markerImage = np.zeros((200, 200), dtype=np.uint8)
#markerImage = cv2.aruco.drawMarker(aruco_dict, 8, 200, markerImage, 1);
#cv2.imwrite("marker4.png", markerImage)
#cap = cv2.VideoCapture(1)


def start_node():
    global detectamos, rango, direccion, profundidad, CoorMsg

    rospy.init_node('camera_subscriber_hd2')
    rospy.loginfo('camera_subscriber_hd2 started')
    rospy.Subscriber("/zed2/left_raw/image_raw_color", Image, process_image)
    rospy.Subscriber('zed2/depth_registered', Image, call_back_depth)
def detect():

    if len(sys.argv) < 2:
        with_feed = False
    elif sys.argv[1] == '-withfeed':
        with_feed = True
    else:
        with_feed = False

    RESOLUTION = 90  # Use this to set the resolution for video feed

    DEADZONE_RIGHT = 1.45
    DEADZONE_LEFT = -DEADZONE_RIGHT

    cap = cv2.VideoCapture(0)

    cap.set(cv2.CAP_PROP_FRAME_WIDTH, RESOLUTIONS[RESOLUTION][0])
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, RESOLUTIONS[RESOLUTION][1])

    corners = np.array([[0, 0]] * 4)

    # Length of the AR Tag
    # In meters
    marker_length = 0.06
    marker_size = 0.01

    ar_dict = ar.Dictionary_get(ar.DICT_6X6_250)
    parameters = ar.DetectorParameters_create()

    calibration_file = "calibration.xml"
    calibration_params = cv2.FileStorage(calibration_file,
                                         cv2.FILE_STORAGE_READ)

    camera_matrix = calibration_params.getNode("cameraMatrix").mat()
    dist_coeffs = calibration_params.getNode("distCoeffs").mat()

    ret, frame = cap.read()
    size = frame.shape

    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)

    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype='double')

    while True:
        ret, frame = cap.read()

        size = frame.shape

        picture = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        corners, ids, rejected_img_points = ar.detectMarkers(
            picture, ar_dict, parameters=parameters)

        if with_feed:
            picture = ar.drawDetectedMarkers(picture, corners)

        if len(corners) == 0:
            continue

        # A tag is detected
        print('Corners:', corners[0])

        # Get rotation and translation vectors
        rvec, tvec, _ = ar.estimatePoseSingleMarkers(corners[0], marker_length,
                                                     camera_matrix,
                                                     dist_coeffs)

        object_x = tvec[0][0][0]
        object_z = tvec[0][0][2]

        print('X Distance', object_x)
        print('Z Distance', object_z)

        direction = get_direction(object_x, object_z)

        print('Direction: ', direction)

        if DEADZONE_LEFT < direction < 0:
            print('Turning Left')
        elif 0 < direction < DEADZONE_RIGHT:
            print('Turning Right')
        else:
            print('Going Straight')

        if with_feed:
            picture = ar.drawAxis(picture, camera_matrix, dist_coeffs, rvec,
                                  tvec, marker_size)
            cv2.imshow('frame', picture)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
Exemple #19
0
 def __init__(self):
     self.aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
     self.parameters = aruco.DetectorParameters_create()
def computeExtrinsics(imgRoot, calibDataDir, rvec=[], tvec=[], debug=False):

    # Parameters rvec and tvec should give the a transformation from the world to an arbitrary reference frame
    if len(rvec) == 0:
        rvec = np.zeros(3).reshape(3, 1)
    if len(tvec) == 0:
        tvec = np.zeros(3).reshape(3, 1)
    tWorld2Ref = {
        "rvec": rvec,
        "tvec": tvec,
        "E": getRigBodyTransfMatrix(rvec, tvec)
    }

    ## Load camera calibration
    fs = cv.FileStorage(calibDataDir, cv.FILE_STORAGE_READ)

    K = fs.getNode("intrinsics").mat()
    distCoeffs = fs.getNode("distortion").mat()
    imgSz = fs.getNode("image_size").mat()
    imgSz = tuple(imgSz.reshape(1, 2)[0].astype(int))

    fs.release()

    ## Define aruco cube coordinates
    # Information given/known:
    # Aruco markers dimensions = 0.69m X 0.69m
    # Edge width = 0.061
    # height of top face of the Aruco cube (to the floor) = 0.88
    d = 0.69 / 2
    e = 0.061
    h = 0.88

    # Transformation that converts a point in the cube reference frame to the world
    tCube2World = {
        "rvec": np.zeros(3).reshape(3, 1),
        "tvec": np.array([[0], [0], [h - d - e]])
    }
    tCube2World["E"] = getRigBodyTransfMatrix(tCube2World["rvec"],
                                              tCube2World["tvec"])

    # Aruco reference frame is in the center of the cube, with:
    # Z pointing upwards
    # X pointing to marker 1
    # Y pointing to marker 4

    # Aruco corner order is clockwise.
    # Aruco marker points in the world's reference frame
    arucoMk0 = np.array([[d, d, h], [d, -d, h], [-d, -d, h], [-d, d, h]])
    arucoMk1 = np.array([[d + e, d, h - 2 * d - e], [d + e, -d, h - 2 * d - e],
                         [d + e, -d, h - e], [d + e, d, h - e]])
    arucoMk2 = np.array([[d, -d - e, h - e], [d, -d - e, h - 2 * d - e],
                         [-d, -d - e, h - 2 * d - e], [-d, -d - e, h - e]])
    arucoMk3 = np.array([[], [], [], []])
    arucoMk4 = np.array([[d, d + e, h - 2 * d - e], [d, d + e, h - e],
                         [-d, d + e, h - e], [-d, d + e, h - 2 * d - e]])
    arucoMarkers = {
        0: arucoMk0,
        1: arucoMk1,
        2: arucoMk2,
        3: arucoMk3,
        4: arucoMk4
    }

    ## Apply given transformation to work the given reference frame
    for key in arucoMarkers.keys():
        if arucoMarkers[key].size > 0:
            arucoMarkers[key] = applyRigBodyTransformation(
                arucoMarkers[key], tWorld2Ref["rvec"], tWorld2Ref["tvec"])
    tCube2Ref = {"E": tWorld2Ref["E"].dot(tCube2World["E"])}
    tCube2Ref["rvec"], tCube2Ref["tvec"] = getRvecTvec(tCube2Ref["E"])

    ## Aruco library config
    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_1000)
    parameters = aruco.DetectorParameters_create()

    # Plot aruco marker
    if debug:
        sampleNum = 0
        sampleMarker = aruco.drawMarker(aruco_dict, sampleNum, 500, None)
        cv.imshow("Sample aruco marker nr {}".format(sampleNum), sampleMarker)
        cv.imwrite("./img/sample-aruco-marker-{}.png".format(sampleNum),
                   sampleMarker)
        cv.waitKey()

    # Configuration tuning of the aruco library can yield:
    mode = 2
    if mode == 1:
        # Good-ish results
        parameters.adaptiveThreshWinSizeMin = 5
        parameters.adaptiveThreshWinSizeMax = 25
        parameters.adaptiveThreshWinSizeStep = 5

        parameters.minMarkerPerimeterRate = 0.2
        parameters.maxMarkerPerimeterRate = 4.0

        parameters.minMarkerDistanceRate = 0.01
    elif mode == 2:
        # Great results
        parameters.cornerRefinementMethod = aruco.CORNER_REFINE_APRILTAG
    else:
        # compared to standard values
        pass

    ## Compute camera extrinsic calibration

    tRef2Cam = {}

    # For each camera
    for fname in glob.glob(imgRoot + "*"):
        print(fname)
        # camNum indexing starts from 0
        camNum = int(fname[-5]) - 1
        img = cv.imread(fname)
        #cv.imshow("img{}".format(camNum+1), img)

        # Detect aruco markers
        imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            imgGray,
            aruco_dict,
            parameters=parameters,
            cameraMatrix=K,
            distCoeff=distCoeffs)
        print("Detected {} markers.".format(len(ids)))

        # Refine corners. Not good, worse performance
        # auxCorners = np.concatenate( np.concatenate( corners, axis=0 ), axis=0 )
        # criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 50, 0.01)
        # auxCorners = cv.cornerSubPix(imgGray, auxCorners, winSize=(5,5), zeroZone=(-1,-1), criteria=criteria)
        # drawPointsInImage("img{} :  refined markers".format(camNum+1), img, auxCorners)

        if debug:
            # Draw detected markers
            frame_markers = aruco.drawDetectedMarkers(img.copy(), corners, ids)
            cv.imshow("img{} :  detected markers".format(camNum + 1),
                      frame_markers)

            frame_markers_rejected = aruco.drawDetectedMarkers(
                img.copy(), rejectedImgPoints)
            cv.imshow("img{} :  rejected markers".format(camNum + 1),
                      frame_markers_rejected)

        # Build correspondence between image points and object points
        imagePoints = np.empty([len(ids) * 4, 2])
        objectPoints = np.empty([len(ids) * 4, 3])
        for i in range(len(ids)):
            id = ids[i][0]
            imagePoints[i * 4:(i + 1) * 4] = corners[i][0]
            objectPoints[i * 4:(i + 1) * 4] = arucoMarkers[id]

        # Find camera pose
        flags = cv.SOLVEPNP_ITERATIVE
        retval, rvec, tvec = cv.solvePnP(objectPoints,
                                         imagePoints,
                                         cameraMatrix=K,
                                         distCoeffs=distCoeffs,
                                         flags=flags)
        tRef2Cam[camNum] = {
            "rvec": rvec,
            "tvec": tvec,
            "E": getRigBodyTransfMatrix(rvec, tvec)
        }

        # Refine?
        #solvePnPRefineLM()
        #solvePnPRefineVVS()

        if debug:
            # Project 3D points back to the camera with the estimated camera pose
            imagePointsEst, _ = cv.projectPoints(objectPoints,
                                                 tRef2Cam[camNum]["rvec"],
                                                 tRef2Cam[camNum]["tvec"],
                                                 cameraMatrix=K,
                                                 distCoeffs=distCoeffs)
            imagePointsEst = imagePointsEst.round()
            annotatedImg = drawPointsInImage(img.copy(), imagePointsEst)
            # Draw cube reference frame
            rvec, tvec = getRvecTvec(tRef2Cam[camNum]["E"].dot(tCube2Ref["E"]))
            annotatedImg = addReferenceMarker(annotatedImg, rvec, tvec, K,
                                              distCoeffs, d + e)
            cv.imshow(
                "Aruco edge points projected back to cam{} with the estimated pose and cube reference frame"
                .format(camNum + 1), annotatedImg)

            cv.waitKey()

    ## Compute transformation between cameras

    transfI2J = []
    for i in range(len(tRef2Cam)):
        auxt = []
        for j in range(len(tRef2Cam)):
            # Shown 2 alternatives to report the transformations between camera reference frames

            # Option 1: Euclidean rigid body transformation matrix (it's simpler to write and read)
            # Get Euclidean rigid body transformation
            Ei = tRef2Cam[i]["E"]
            Ej = tRef2Cam[j]["E"]
            # Find the transformation that converts a point in camera i reference frame back to the world
            retval, invEi = cv.invert(Ei)
            if not retval == 1:
                print(
                    "Failed to invert camera {} extrinsic calibration".format(
                        i))
                exit(-1)
            # Compose both transformations to get a transformation from reference i to j
            dic = {"E": Ej.dot(invEi)}

            # Option 2: rotation matrix/vector and translation vector
            Ri, _ = cv.Rodrigues(tRef2Cam[i]["rvec"])
            Ti = tRef2Cam[i]["tvec"]
            Rj, _ = cv.Rodrigues(tRef2Cam[j]["rvec"])
            Tj = tRef2Cam[j]["tvec"]
            # Find the transformation that converts a point in camera i reference frame in camera j
            retval, invRi = cv.invert(Ri)
            if not retval == 1:
                print("Failed to invert camera {} rotation matrix".format(i))
                exit(-1)
            dic["rvec"] = cv.Rodrigues(Rj.dot(invRi))[0]
            dic["tvec"] = -Rj.dot(invRi).dot(Ti) + Tj

            if debug:
                # Checking if reported transforms are equivalent
                print(
                    np.sum(dic["E"] -
                           getRigBodyTransfMatrix(dic["rvec"], dic["tvec"])))

            auxt.append(dic)

        transfI2J.append(auxt)

    if debug:
        ## Quick tests
        # Sanity check. Transformation from camera i to j is the inverse of the one from camera j to i
        for i in range(len(transfI2J)):
            for j in range(i, len(transfI2J)):
                print(
                    np.all(
                        transfI2J[i][j]["E"].dot(transfI2J[j][i]["E"]) -
                        np.eye(4) < np.finfo(transfI2J[i][j]["E"].dtype).eps *
                        10))

        # Test: Plot camera reference frames. Not working! Needs specific code to deal with points outside camera FOV
        # Standard opencv function does not deal with this well
        # annotatedImg2 = img.copy()
        # for i in range(len(transfI2J)):
        #     rvec, tvec = getRvecTvec( transfI2J[i][camNum] )
        #     annotatedImg = addReferenceMarker(annotatedImg2, rvec, tvec, K, distCoeffs, 4)
        # cv.imshow("annotated img with camera reference frames", annotatedImg2)

        cv.waitKey()

    return tRef2Cam, transfI2J


## Might come in handy

# sift = cv.SIFT_create()
# kp = sift.detect(imgGray,None)
# img=cv.drawKeypoints(imgGray,kp,img)
# cv.imshow("sift", img)
#
# cv.waitKey()
def aruco_coordinates(file_name):

    ids = None
    flag_contour = 0
    cap = cv2.VideoCapture(file_name)  # Capture video from camera

    while (cap.isOpened() and (flag_contour == 0 or ids == None)):
        ret, frame = cap.read()

        area = (frame.shape)
        frame_area = area[0] * area[1]
        print("Frame Area")
        #print(frame_area)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        if (ids == None):

            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
            gray = clahe.apply(gray)
            gray_aruco = cv2.GaussianBlur(gray, (5, 5), 0)
            aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
            parameters = aruco.DetectorParameters_create()
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray_aruco, aruco_dict, parameters=parameters)
            print("id of aruco = ")
            print(ids.flatten())

        if flag_contour == 0:
            #contour filtering part
            blurred = cv2.bilateralFilter(gray, 11, 17, 17)
            kernel = np.ones((5, 5), np.uint8)
            blurredopen = cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel)
            blurredopen = cv2.morphologyEx(blurredopen, cv2.MORPH_OPEN, kernel)
            blurredclose = cv2.morphologyEx(blurredopen, cv2.MORPH_CLOSE,
                                            kernel)
            edged = cv2.Canny(blurredclose, 30, 200)
            cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE,
                                    cv2.CHAIN_APPROX_SIMPLE)
            # cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
            cnts = cnts[0] if imutils.is_cv2() else cnts[1]
            cntsSorted = sorted(cnts,
                                key=lambda x: cv2.contourArea(x),
                                reverse=True)[:1]
            for c in cntsSorted:
                # approximate the contour
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.01 * peri, True)
                # if our approximated contour has four points, then
                # we can assume that we have found our screen
                if len(approx) == 4:
                    contour_area = (cv2.contourArea(c))
                    #print("Area Percent")
                    # print(cv2.contourArea(c))
                    areapercent = (contour_area / frame_area) * 100
                    #print(areapercent)
                    if areapercent > 25:
                        #print("Arena Found")
                        screenCnt = approx
                        contours = screenCnt
                        flag_contour = 1
                        if flag_contour == 1 and ids != None:
                            break
    print("i am out")
    cap.release()
    return ((ids.flatten()), contours)
def detect_markers_and_draw(mtx, dist, frame, drawing_img):
    marker_size = 2.8/100   # marker length in meters
    axis_length = 0.015

    # get dictionary and parameters
    parameters = aruco.DetectorParameters_create()  # create parameters
    parameters.detectInvertedMarker = False  # enable detection of inverted markers

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # convert to gray

    # get marker data
    corners, ids, rejected_img_points = aruco.detectMarkers(gray, ARUCO_DICT, parameters=parameters)

    # draw corners on frame
    frame_m = aruco.drawDetectedMarkers(frame, corners, ids)

    if len(corners) == 4:

        # get corners of initial image
        y_max = frame.shape[0]
        x_max = frame.shape[1]
        frame_corners = [[0, 0], [x_max, 0], [0, y_max], [x_max, y_max]]

        # get transform corners
        transform_corners = np.zeros((len(ids), 2))
        for i in range(len(ids)):
            corner_num = ids[i][0]

            # get center x and y (calculating average)
            x, y = 0, 0
            for j in range(4):
                x += corners[i][0][j][0]
                y += corners[i][0][j][1]

            # add center to transform matrix
            transform_corners[corner_num] = [x/4, y/4]

        # transform drawing
        pts1 = np.float32(frame_corners)
        pts2 = np.float32(transform_corners)
        M = cv2.getPerspectiveTransform(pts1, pts2)
        drawing_img = cv2.warpPerspective(drawing_img, M, (drawing_img.shape[1], drawing_img.shape[0]))

        # create mask
        gray_square = cv2.cvtColor(drawing_img, cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(gray_square, 5, 255, cv2.THRESH_BINARY)
        mask_inv = cv2.bitwise_not(mask)

        # get background from frame
        # print(f'frame_m shape {frame_m.shape}')
        # print(f'mask shape {mask_inv.shape}')
        frame_bg = cv2.bitwise_and(frame_m, frame_m, mask=mask_inv)

        # take only drawing from drawing image
        drawing_img = cv2.bitwise_and(drawing_img, drawing_img, mask=mask)

        frame_m = cv2.add(frame_bg, drawing_img)

        # point_1 = (int(corners[0][0][0][0]), (corners[0][0][0][1]))
        # point_2 = (int(corners[1][0][0][0]), (corners[1][0][0][1]))
        # cv2.line(square_img, point_1, point_2, (255, 0, 0), 3)

    # estimate pose
    rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, marker_size, mtx, dist)
    if tvecs is not None:
        for i in range(len(tvecs)):
            frame_m = aruco.drawAxis(frame_m, mtx, dist, rvecs[i], tvecs[i], axis_length)

    return frame_m, rvecs, tvecs
def show_webcam(mirror=False):
    cam = cv2.VideoCapture(0)  # Capture Webcam
    BLUE = (255, 0, 0)
    KNOWN_WIDTH_INCHES = 4.55  # AR Marker width in inches
    KNOWN_WIDTH_MM = KNOWN_WIDTH_INCHES * 25.4
    WIDTH = 480  # Image width in pixels
    HEIGHT = 640  # image height in pixels
    PIXEL_SIZE = 310  # Size of AR Marker in Pixels
    DISTANCE0 = 12  # 1 foot in inches
    FOCAL_LENGTH = DISTANCE0 * PIXEL_SIZE / KNOWN_WIDTH_INCHES
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    font = cv2.FONT_HERSHEY_PLAIN
    while True:

        ret_val, frame = cam.read()  # Read each frame from camera
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # Gray scale image

        red_img = frame.copy()
        # set blue and green channels to 0
        red_img[:, :, 0] = 0
        red_img[:, :, 1] = 0
        #Look for faces
        faces = face_cascade.detectMultiScale(gray, 1.1, 4)
        ar_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        param = aruco.DetectorParameters_create()
        corners, ids, rejectedImgPoints = aruco.detectMarkers(frame,
                                                              ar_dict,
                                                              parameters=param)
        ar_gray = aruco.drawDetectedMarkers(frame, corners)
        if (len(corners) > 0):
            Xcord = [corners[0][0][0][0], corners[0][0][2][0]]
            Ycord = [corners[0][0][0][1], corners[0][0][2][1]]
            pixel_size = corners[0][0][0][0] - corners[0][0][1][0]
            distance = FOCAL_LENGTH * KNOWN_WIDTH_INCHES / pixel_size
            print("Distance to Object " + str(distance) + "\n")
            x = midPointCalculation(Xcord)
            y = midPointCalculation(Ycord)

            center = (int(x), int(y))
            #print(str(x) +","+ str(y))
            LEFT_BOUND = 215
            RIGHT_BOUND = 430
            if x < LEFT_BOUND:
                pass
            elif x > RIGHT_BOUND:
                pass
            else:
                pass
            print("\n")

            cv2.circle(gray, center, 5, BLUE, thickness=-1)

        #Look for faces
        if len(faces) != 0:
            cv2.putText(red_img, "FACE DETECTED", (250, 435), font, 2,
                        (255, 255, 255))
        for (x, y, w, h) in faces:
            cv2.rectangle(red_img, (x, y), (x + w, y + h), (255, 0, 0), 2)

        cv2.imshow("Vision", gray)
        if cv2.waitKey(1) == 27:
            break  # esc to quit
    cam.release()  # Release capture
    cv2.destroyAllWindows()  # Close windows
Exemple #24
0
def main():

    face_landmark_path = './shape_predictor_68_face_landmarks.dat'
    #flags/initializations to be set
    skip_frame = 3
    socket_connect = 1  # set to 0 if we are testing the code locally on the computer with only the realsense tracking.
    simplified_calib_flag = 0  # this is set to 1 if we want to do one-time calibration
    arucoposeflag = 1
    # img_idx keeps track of image index (frame #).
    img_idx = 0
    N_samples_calib = 10  # number of samples for computing the calibration matrix using homography

    if socket_connect == 1:
        #  create a socket object
        s = socket.socket()
        print("Socket successfully created")
        # reserve a port on your computer in our case it is 2020 but it can be anything
        port = 2020
        s.bind(('', port))
        print("socket binded to %s" % (port))

        # put the socket into listening mode
        s.listen(5)
        print("socket is listening")
        c, addr = s.accept()
        print('got connection from ', addr)

    if socket_connect == 1 and simplified_calib_flag == 0:
        arucoinstance = arucocalibclass()
        ReturnFlag = 1
        aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
        marker_len = 0.0645
        MLRSTr = arucoinstance.startcamerastreaming(c, ReturnFlag, marker_len,
                                                    aruco_dict,
                                                    N_samples_calib)
        print(MLRSTr)
    elif socket_connect == 1 and simplified_calib_flag == 1:
        T_M2_RS = np.array([
            -1.0001641, 0.00756584, 0.00479072, 0.03984956, -0.00774137,
            -0.99988126, -0.03246199, -0.01359556, 0.00453644, -0.03251681,
            0.99971441, -0.00428408, 0., 0., 0., 1.
        ])
        T_M2_RS = T_M2_RS.reshape(4, 4)
        MLRSTr = simplified_calib(T_M2_RS, c)
    else:
        MLRSTr = np.array((1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1))
        MLRSTr = MLRSTr.reshape(4, 4)
        print(MLRSTr)
# end socket ######


# Configure depth and color streams of Realsense camera
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    # Start streaming
    profile = pipeline.start(config)
    align_to = rs.stream.color
    align = rs.align(align_to)

    # Intrinsics & Extrinsics of Realsense
    depth_intrin = profile.get_stream(
        rs.stream.depth).as_video_stream_profile().get_intrinsics()
    intr = profile.get_stream(
        rs.stream.color).as_video_stream_profile().get_intrinsics()
    mtx = np.array([[intr.fx, 0, intr.ppx], [0, intr.fy, intr.ppy], [0, 0, 1]])
    dist = np.array(intr.coeffs)
    #load cascade classifier training file for lbpcascade
    lbp_face_cascade = cv2.CascadeClassifier(
        "./haarcascade_frontalface_alt2.xml"
    )  #"/home/supriya/.local/lib/python3.6/site-packages/cv2/data/haarcascade_frontalface_alt2.xml") #cv2.CascadeClassifier('/home/supriya/supriya/FSA-Net/demo/lbpcascade_frontalface.xml')   #cv2.CascadeClassifier('data/lbpcascade_frontalface_improved.xml') # cv2.CascadeClassifier('/home/supriya/supriya/FSA-Net/demo/lbpcascade_frontalface.xml')
    facemark = cv2.face.createFacemarkLBF()
    facemark.loadModel('./lbfmodel.yaml')

    print('Start detecting pose ...')
    detected_pre = []

    while True:
        # get video frame
        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)
        color_frame = aligned_frames.get_color_frame()
        aligned_depth_frame = aligned_frames.get_depth_frame()

        if not aligned_depth_frame or not color_frame:
            continue
        # read color frame
        input_img = np.asanyarray(color_frame.get_data())
        #increment count of the image index
        img_idx = img_idx + 1
        img_h, img_w, _ = np.shape(input_img)

        # Process the first frame and every frame after "skip_frame" frames
        if img_idx == 1 or img_idx % skip_frame == 0:
            # convert image to grayscale
            gray_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
            # detect faces using LBP detector
            faces = lbp_face_cascade.detectMultiScale(gray_img,
                                                      scaleFactor=1.1,
                                                      minNeighbors=5)
            #draw rectangle around detected face
            for (x, y, w, h) in faces:
                cv2.rectangle(input_img, (x, y), (x + w, y + h), (0, 255, 0),
                              2)

            depth_point = [0, 0, 0]
            if len(faces) > 0:
                #detect landmarks
                status, landmarks = facemark.fit(gray_img, faces)
                #draw dots on the detected facial landmarks
                for f in range(len(landmarks)):
                    cv2.face.drawFacemarks(input_img, landmarks[f])
                #get head pose
                reprojectdst, rotation_vec, rotation_mat, translation_vec, euler_angle, image_pts = get_head_pose(
                    landmarks[0][0], mtx, dist)
                # draw circle on image points (nose tip, corner of eye, lip and chin)
                for p in image_pts:
                    cv2.circle(input_img, (int(p[0]), int(p[1])), 3,
                               (0, 0, 255), -1)
            # draw circle on image points (nose tip, corner of eye, lip and chin)
                for p in image_pts:
                    cv2.circle(input_img, (int(p[0]), int(p[1])), 3,
                               (0, 0, 255), -1)

                # draw line sticking out of the nose tip and showing the head pose
                (nose_end_point2D,
                 jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                               rotation_vec, translation_vec,
                                               mtx, dist)
                p1 = (int(image_pts[0][0]), int(image_pts[0][1]))
                p2 = (int(nose_end_point2D[0][0][0]),
                      int(nose_end_point2D[0][0][1]))
                cv2.line(input_img, p1, p2, (255, 0, 0), 2)

                # get 3D co-ord of nose - to get a more accurate estimation of the translaion of the head
                depth = aligned_depth_frame.get_distance(
                    landmarks[0][0][30][0], landmarks[0][0][30][1])
                cv2.circle(input_img,
                           (landmarks[0][0][30][0], landmarks[0][0][30][1]), 3,
                           (0, 255, 0), -1)
                depth_point = rs.rs2_deproject_pixel_to_point(
                    depth_intrin,
                    [landmarks[0][0][30][0], landmarks[0][0][30][1]], depth)
                depth_point = np.array(depth_point)
                depth_point = np.reshape(depth_point, [1, 3])

                #check if the depth estimation is not zero and filters out faces within 0.8 m from the camera
                if (depth_point[0][2] != 0 and depth_point[0][2] < 0.8):  #
                    #Combine rotation matrix and translation vector (given by the depth point) to get the head pose
                    RSTr = np.hstack([rotation_mat, depth_point.transpose()])
                    RSTr = np.vstack([RSTr, [0, 0, 0, 1]])
                    print("head pose", RSTr)

                    # If arucoposeflag = 1, an Aruco marker will  get detected and its transformed pose will be streamed to the AR headset and the pose
                    # of the tracking parent will be updated to reflect Aruco marker pose. This can be used to verify/test the accuracy of teh calibration
                    if arucoposeflag == 1:
                        print("aruco")
                        gray = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
                        # set dictionary size depending on the aruco marker selected
                        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
                        # detector parameters can be set here (List of detection parameters[3])
                        parameters = aruco.DetectorParameters_create()
                        parameters.adaptiveThreshConstant = 10
                        # lists of ids and the corners belonging to each id
                        corners, ids, rejectedImgPoindetectorts = aruco.detectMarkers(
                            gray, aruco_dict, parameters=parameters)
                        # font for displaying text (below)
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        # check if the ids list is not empty
                        if np.all(ids != None):
                            # estimate pose of each marker and return the values
                            rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                                corners, 0.045, mtx, dist
                            )  # 0.0628 (0.061 if using Dell laptop - 95% zoom)
                            for i in range(0, ids.size):
                                # draw axis for the aruco markers
                                aruco.drawAxis(input_img, mtx, dist, rvec[i],
                                               tvec[i], 0.1)

    # draw a square around the markers
                            aruco.drawDetectedMarkers(input_img, corners)
                            #Combine rotation matrix and translation vector to get Aruco pose
                            R_rvec = R.from_rotvec(rvec[0])
                            R_rotmat = R_rvec.as_matrix()
                            AruRSTr = np.hstack(
                                [R_rotmat[0], tvec[0].transpose()])
                            AruRSTr = np.vstack([AruRSTr, [0, 0, 0, 1]])
                            RSTr = AruRSTr
                            print("Aruco pose", AruRSTr)

                    # Since pose detected in OpenCV will be right handed coordinate system, it needs to be converted to left-handed coordinate system of Unity
                    RSTr_LH = np.array([
                        RSTr[0][0], RSTr[0][2], RSTr[0][1], RSTr[0][3],
                        RSTr[2][0], RSTr[2][2], RSTr[2][1], RSTr[2][3],
                        RSTr[1][0], RSTr[1][2], RSTr[1][1], RSTr[1][3],
                        RSTr[3][0], RSTr[3][1], RSTr[3][2], RSTr[3][3]
                    ])  # converting to left handed coordinate system
                    RSTr_LH = RSTr_LH.reshape(4, 4)
                    #Compute the transformed pose to be streamed to MagicLeap
                    HeadPoseTr = np.matmul(MLRSTr, RSTr_LH)

                    if socket_connect == 1:
                        # Array to be sent
                        ArrToSend = np.array([
                            HeadPoseTr[0][0], HeadPoseTr[0][1],
                            HeadPoseTr[0][2], HeadPoseTr[0][3],
                            HeadPoseTr[1][0], HeadPoseTr[1][1],
                            HeadPoseTr[1][2], HeadPoseTr[1][3],
                            HeadPoseTr[2][0], HeadPoseTr[2][1],
                            HeadPoseTr[2][2], HeadPoseTr[2][3],
                            HeadPoseTr[3][0], HeadPoseTr[3][1],
                            HeadPoseTr[3][2], HeadPoseTr[3][3]
                        ])
                        # Send transformed pose to AR headset
                        dataTosend = struct.pack('f' * len(ArrToSend),
                                                 *ArrToSend)
                        c.send(dataTosend)
            else:
                print("No Face Detected")
            # display detected face with landmarks, pose.
            cv2.imshow('Landmark_Window', input_img)

        key = cv2.waitKey(1)
Exemple #25
0
    def run(self):
        aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
        parameters = aruco.DetectorParameters_create()
        font = cv2.FONT_HERSHEY_PLAIN

        camera_matrix, camera_distortion, _ = loadCameraParams('webcam')

        cap, resolution = init_cv()

        while True:
            ret, frame = cap.read()

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            corners, ids, rejected = aruco.detectMarkers(
                image=gray,
                dictionary=aruco_dict,
                parameters=parameters,
                cameraMatrix=camera_matrix,
                distCoeff=camera_distortion)

            if ids is not None:
                #-- Estimate poses of detected markers
                rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                    corners, marker_size, camera_matrix, camera_distortion)

                #-- Unpack the output, get only the first
                rvec, tvec = rvecs[0, 0, :], tvecs[0, 0, :]

                #-- Draw the detected marker and put a reference frame over it
                aruco.drawDetectedMarkers(frame, corners)

                # Draw the detected markers axis
                for i in range(len(rvecs)):
                    aruco.drawAxis(frame, camera_matrix, camera_distortion,
                                   rvecs[i, 0, :], tvecs[i, 0, :], 0.1)

                #-- Obtain the rotation matrix tag->camera
                R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
                R_tc = R_ct.T

                #-- Now get Position and attitude of the camera respect to the marker
                pos_camera = -R_tc * np.matrix(tvec).T
                str_position = "Position error: x=%4.4f  y=%4.4f  z=%4.4f" % (
                    pos_camera[0], pos_camera[1], pos_camera[2])
                cv2.putText(frame, str_position, (0, 20), font, 1,
                            ugly_const.BLACK, 2, cv2.LINE_AA)

                #-- Get the attitude of the camera respect to the frame
                roll_camera, pitch_camera, yaw_camera = rotationMatrixToEulerAngles(
                    R_flip * R_tc)
                str_attitude = "Anglular error: roll=%4.4f  pitch=%4.4f  yaw (z)=%4.4f" % (
                    math.degrees(roll_camera), math.degrees(pitch_camera),
                    math.degrees(yaw_camera))
                cv2.putText(frame, str_attitude, (0, 40), font, 1,
                            ugly_const.BLACK, 2, cv2.LINE_AA)

                drawHUD(frame, resolution, yaw_camera)

                self.ctrl_message.errorx = pos_camera[0]
                self.ctrl_message.errory = pos_camera[1]
                self.ctrl_message.errorz = pos_camera[2]
                self.ctrl_message.erroryaw = math.degrees(yaw_camera)

            cv2.imshow('frame', frame)

            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                cap.release()
                cv2.destroyAllWindows()
 def __init__(self):
     self.aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
     self.params = aruco.DetectorParameters_create()
     self.cap = cv2.VideoCapture(0)
     self.has_screen = bool(os.environ.get('DISPLAY', None))
def Follow():
    global CornerCounter
    cap = cv2.VideoCapture(0)
    print(cap.isOpened())
    time.sleep(1)
    _, img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray, 50, 150, apertureSize=3)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
    parameters = aruco.DetectorParameters_create()
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=parameters)

    for i in range(4):
        _, img = cap.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 50, 150, apertureSize=3)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
        parameters = aruco.DetectorParameters_create()
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)

        for x in range(0, len(ids)):

            Tempx = corners[x][0][0][0] + corners[x][0][1][0] + corners[x][0][
                2][0] + corners[x][0][3][0]
            Tempx = Tempx / 4
            Tempy = corners[x][0][0][1] + corners[x][0][1][1] + corners[x][0][
                2][1] + corners[x][0][3][1]
            Tempy = Tempy / 4

            if ids[x] == CVConstants.corner_ids[0]:
                globals.CornerPositions[0] = [Tempx, Tempy]
            elif ids[x] == CVConstants.corner_ids[1]:
                globals.CornerPositions[1] = [Tempx, Tempy]
            elif ids[x] == CVConstants.corner_ids[2]:
                globals.CornerPositions[2] = [Tempx, Tempy]
            elif ids[x] == CVConstants.corner_ids[3]:
                globals.CornerPositions[3] = [Tempx, Tempy]

    #get all vertices:
    lowest_sum = globals.CornerPositions[0][0] + globals.CornerPositions[0][1]
    highest_sum = lowest_sum
    bottom_right = globals.CornerPositions[0]
    bottom_left = globals.CornerPositions[0]
    top_right = globals.CornerPositions[0]
    top_left = globals.CornerPositions[0]

    taken = [0, 0]

    for i in range(4):
        cx, cy = globals.CornerPositions[i]
        sum = cx + cy
        if sum < lowest_sum:
            lowest_sum = sum
            bottom_right = globals.CornerPositions[i]
            taken[0] = i
        elif sum > highest_sum:
            highest_sum = sum
            top_left = globals.CornerPositions[i]
            taken[1] = i
    for i in range(4):
        if i not in taken:
            bottom_left = globals.CornerPositions[i]
            taken.append(i)
    for i in range(4):
        if i not in taken:
            top_right = globals.CornerPositions[i]
            taken.append(i)
    if bottom_left[0] > top_right[0]:
        bottom_left, top_right = top_right, bottom_left

    N = len(globals.Grid)
    hx = (top_left[0] - bottom_right[0]) / (N - 1)
    hy = (top_left[1] - bottom_right[1]) / (N - 1)

    for i in range(N):
        for j in range(N):
            globals.Grid[i][j][0] = bottom_right[0] + hx * i
            globals.Grid[i][j][1] = bottom_right[1] + hy * j

    while (1):
        _, img = cap.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 50, 150, apertureSize=3)

        font = cv2.FONT_HERSHEY_SIMPLEX

        aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
        parameters = aruco.DetectorParameters_create()

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)

        CornerPositions = globals.CornerPositions

        cv2.circle(img,
                   (int(CornerPositions[0][0]), int(CornerPositions[0][1])),
                   20, (0, 255, 0), -1)
        cv2.circle(img,
                   (int(CornerPositions[1][0]), int(CornerPositions[1][1])),
                   20, (0, 255, 0), -1)
        cv2.circle(img,
                   (int(CornerPositions[2][0]), int(CornerPositions[2][1])),
                   20, (0, 250, 0), -1)
        cv2.circle(img,
                   (int(CornerPositions[3][0]), int(CornerPositions[3][1])),
                   20, (0, 0, 255), -1)

        cv2.line(img, (int(CornerPositions[0][0]), int(CornerPositions[0][1])),
                 (int(CornerPositions[1][0]), int(CornerPositions[1][1])),
                 (255, 0, 0), 2)
        cv2.line(img, (int(CornerPositions[0][0]), int(CornerPositions[0][1])),
                 (int(CornerPositions[3][0]), int(CornerPositions[3][1])),
                 (255, 0, 0), 2)
        cv2.line(img, (int(CornerPositions[3][0]), int(CornerPositions[3][1])),
                 (int(CornerPositions[2][0]), int(CornerPositions[2][1])),
                 (255, 0, 0), 2)
        cv2.line(img, (int(CornerPositions[2][0]), int(CornerPositions[2][1])),
                 (int(CornerPositions[1][0]), int(CornerPositions[1][1])),
                 (255, 0, 0), 2)

        if not corners:
            pass
        else:
            for x in range(0, len(ids)):
                if (len(corners[x][0]) < 4):
                    print("No 4 corners?")
                    print(corners[x][0])
                if ids[x] in CVConstants.bot_ids:
                    idd = ids[x]
                    index = -1
                    for i in range(len(CVConstants.bot_ids)):
                        if idd == CVConstants.bot_ids[i]:
                            index = i
                            break
                    if index == -1:
                        continue

                    globals.RobotState[index][0] = (
                        corners[x][0][0][0] + corners[x][0][1][0] +
                        corners[x][0][2][0] + corners[x][0][3][0]) / 4
                    globals.RobotState[index][1] = (
                        corners[x][0][0][1] + corners[x][0][1][1] +
                        corners[x][0][2][1] + corners[x][0][3][1]) / 4

                    cv2.line(
                        img,
                        (int(corners[x][0][2][0]), int(corners[x][0][2][1])),
                        (int(corners[x][0][1][0]), int(corners[x][0][1][1])),
                        (65, 255, 32), 2)

                    heading = getAngleBetweenPoints(int(corners[x][0][2][0]),
                                                    int(corners[x][0][2][1]),
                                                    int(corners[x][0][1][0]),
                                                    int(corners[x][0][1][1]))
                    heading_degrees = math.degrees(heading)
                    globals.RobotState[index][2] = heading_degrees
                    cv2.putText(img, str(heading_degrees), (100, 200), font,
                                0.5, (0, 255, 0), 2, cv2.LINE_AA)

        gray = aruco.drawDetectedMarkers(img, corners, ids, (0, 255, 255))
        cv2.imshow('img', img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Exemple #28
0
def augment_reality(path):
    #Load reference image and replacement image
    replacement_img = cv2.imread(path)
    board_img = cv2.imread("board_aruco.png")

    #Detect markers in reference image and get origin points
    board_img_gray = cv2.cvtColor(board_img, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()
    board_markers, board_ids, _ = aruco.detectMarkers(board_img_gray,
                                                      aruco_dict,
                                                      parameters=parameters)
    orig_points = np.asarray(board_markers).reshape((-1, 2))

    # Using webcam
    cap = cv2.VideoCapture(0)
    while (True):
        ret, frame = cap.read()  #640x480
        frame_copy = frame.copy()

        # Detecting markers in real time
        gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        parameters = aruco.DetectorParameters_create()
        markers, ids, _ = aruco.detectMarkers(gray_image,
                                              aruco_dict,
                                              parameters=parameters)

        if (len(markers) == 0):
            print("Nenhum marcador encontrado.")
        else:
            dest_points = np.asarray(markers).reshape((-1, 2))

            # Find the reference coordinate of the detected markers
            ids = np.asarray(ids)
            detected_orig_points = []
            for id in ids:
                index = np.argwhere(board_ids == id)[0][0]
                for i in range(4):
                    detected_orig_points.append(orig_points[(index * 4) + i])
            detected_orig_points = np.asarray(detected_orig_points).astype(int)

            # Black image with replacement image over markers
            homography = cv2.findHomography(detected_orig_points,
                                            dest_points)[0]
            h, w, _ = frame.shape
            warped_image = cv2.warpPerspective(replacement_img, homography,
                                               (w, h), frame_copy)

            # Optional -- draw detected markers in image
            # aruco.drawDetectedMarkers(frame, markers)

            # Overlapping images
            mask = warped_image > [0, 0, 0]
            frame = np.where(mask == True, warped_image, frame)

        # Show image, press q to exit
        cv2.imshow('clean image', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Clean everything up
    cap.release()
    cv2.destroyAllWindows()
Exemple #29
0
cap = cv2.VideoCapture(0)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640);
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT,480);
camera_matrix = np.array([[1776, 0, 762], [0, 1780, 1025], [0, 0, 1]],
                         dtype=float)
dist_coeffs = np.array([[0, 0, 0, 0]], dtype=float)

while True:
    # Capture frame-by-frame
    ret, frame = cap.read()
    #print(frame.shape) #480x640
    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_50)
    parameters = aruco.DetectorParameters_create()

    #print(parameters)
    '''    detectMarkers(...)
        detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
        mgPoints]]]]) -> corners, ids, rejectedImgPoints
        '''
    #lists of ids and the corners beloning to each id
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=parameters)
    print(ids)

    #It's working.
    # my problem was that the cellphone put black all around it. The alrogithm
    # depends very much upon finding rectangular black blobs
    colored = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
Exemple #30
0
def aruco_fun():
	global pipe, cameraMatrix, distCoeffs, depth_intrinsics
	axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
	if True:
		
		frames = pipe.wait_for_frames()
		align_to = rs.stream.color
		align = rs.align(align_to)
		aligned_frames = align.process(frames)
		depth_frame = aligned_frames.get_depth_frame()
		color_frame = aligned_frames.get_color_frame()
		color_img = np.array(color_frame.get_data())
		color_img_temp = copy.deepcopy(color_img)
		depth_img = np.array(depth_frame.get_data())
		frame = color_img_temp 
		gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
		aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
		parameters =  aruco.DetectorParameters_create()
		corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
		#print(corners,ids)
		#center_p(corners, depth_img,cameraMatrix)
		#if False:
		if ids is not None and len(ids) > 0:
			global wheel_chair_translation_vectors, find_wheel_chair
			#print("len",len(ids[0]))
			num = int(len(ids))
			for id_obj in range(num):

				if ids[id_obj] == 6:
					find_wheel_chair = 1
					wheel_chair_translation_vectors = get_xyz(corners[id_obj], depth_img,cameraMatrix,depth_intrinsics)
				elif ids[id_obj]!=6:
					if find_wheel_chair == 0:
						continue
					obj_translation_vectors = get_xyz(corners[id_obj], depth_img,cameraMatrix,depth_intrinsics)
					p = obj_translation_vectors - wheel_chair_translation_vectors
					print(ids[id_obj]," ","postion vetor is",p)
					#process_data(ids[id_obj],p)
		gray = aruco.drawDetectedMarkers(gray, corners)
		cv2.imshow('frame',gray)
		if cv2.waitKey(1) & 0xFF == ord('q'):
			cv2.destroyAllWindows()
			#break
	if False:
	#while(True):
		frames = pipe.wait_for_frames()
		
		color_frame = frames.get_color_frame()
		# Convert images to numpy arrays
		color_image = np.asanyarray(color_frame.get_data())
		gray = cv2.cvtColor(color_image.copy(), cv2.COLOR_RGB2GRAY)
		corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)
		corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
				image = gray,
				board = board,
				detectedCorners = corners,
				detectedIds = ids,
				rejectedCorners = rejectedImgPoints,
				cameraMatrix = cameraMatrix,
				distCoeffs = distCoeffs)
		ProjectImage = color_image.copy()
		ProjectImage = aruco.drawDetectedMarkers(ProjectImage, corners, borderColor=(0, 0, 255))
		print(ids)
		if ids is not None and len(ids) > 0:
			# Estimate the posture per each Aruco marker
			rotation_vectors, translation_vectors, _objPoints = aruco.estimatePoseSingleMarkers(corners, 1, cameraMatrix, distCoeffs)
			#global wheel_chair_rotation_vectors,  wheel_chair_translation_vectors, find_wheel_chair ,w2c
			print("len",len(ids[0]))
			num = int(len(ids))
			for id_obj in range(num):

				if ids[id_obj] == 6:
					find_wheel_chair = 1
					wheel_chair_rotation_vectors =cv2.Rodrigues( rotation_vectors[id_obj][0])[0]
					wheel_chair_translation_vectors = rotation_vectors[id_obj]
					c2w = np.c_[wheel_chair_rotation_vectors,wheel_chair_translation_vectors[0]]
					temp = np.array([0,0,0,1])
					c2w = np.r_[c2w, [temp]]
					#w2c = np.linalg.inv(c2w) 
					w2c = np.c_[wheel_chair_rotation_vectors.T,-wheel_chair_rotation_vectors.T.dot(wheel_chair_translation_vectors[0])]
					w2c = np.r_[w2c, [temp]]
				elif ids[id_obj]!=6:
					if find_wheel_chair == 0:
						continue
					obj_rotation_vectors =cv2.Rodrigues( rotation_vectors[id_obj][0])[0]
					obj_translation_vectors = rotation_vectors[id_obj]
					c2o = np.c_[obj_rotation_vectors,obj_translation_vectors[0]]

					temp = np.array([0,0,0,1])
					c2o = np.r_[c2o, [temp]]					
					w2o = w2c.dot(c2o)
					#p = w2o[:,3][:3]
					p = compute_w2o(wheel_chair_rotation_vectors,wheel_chair_translation_vectors,obj_rotation_vectors,obj_translation_vectors)
					
					print(ids[id_obj]," ","postion vetor is",p)
					#process_data(ids[id_obj],p)
				
			ids = 1

			for rvec in rotation_vectors:
				rotation_mat = cv2.Rodrigues(rvec[0])[0]
				ids = ids+1
				

			for rvec, tvec in zip(rotation_vectors, translation_vectors):
				if len(sys.argv) == 2 and sys.argv[1] == 'cube':
					try:
						imgpts, jac = cv2.projectPoints(axis, rvec, tvec, cameraMatrix, distCoeffs)
						ProjectImage = drawCube(ProjectImage, corners, imgpts)
					except:
						continue
				else:    
					ProjectImage = aruco.drawAxis(ProjectImage, cameraMatrix, distCoeffs, rvec, tvec, 1)
		cv2.imshow('ProjectImage', ProjectImage)
		#frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
		#cv2.imshow('frame',frame_markers)
		#end = datetime.now()
		#exec_time = end - start
		#print("aruco control")
		#print(exec_time,exec_time.total_seconds())
		if cv2.waitKey(1) & 0xFF == ord('q'):
			cv2.destroyAllWindows()