def detect():
    cameraMatrix = np.array([[8.2177218160147447e+02, 0., 3.4694289806342221e+02],[0., 8.2177218160147447e+02, 2.4795144956871457e+02],[0., 0., 1.]])
    distCoeffs = np.array([4.4824308523616324e-02, -7.4951985348854000e-01, 3.7539708742088725e-03, 8.8931335565222442e-03, 3.7214188475390984e+00])
    #型宣言
    rvecs = np.array([])
    tvecs = np.array([])
    
    # Capture frame-by-frame
    ret, frame = cap.read()
    #print(frame.shape) #480x640
    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()
    #print(parameters)
 
    '''    detectMarkers(...)
        detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
        mgPoints]]]]) -> corners, ids, rejectedImgPoints
        '''
        #lists of ids and the corners beloning to each id
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
    #print(corners)
 
    #It's working.
    # my problem was that the cellphone put black all around it. The alrogithm
    # depends very much upon finding rectangular black blobs

    if (corners != []): 
        rvecs, tvecs = aruco.estimatePoseSingleMarkers(corners, 0.05, cameraMatrix, distCoeffs)  
        frame = aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvecs, tvecs, 0.1)
        frame = aruco.drawDetectedMarkers(frame, corners, ids)
    return frame, rvecs, tvecs
Example #2
0
def detect():
    # camera: c270
    cameraMatrix = np.array([[8.2177218160147447e+02, 0.0                   , 3.4694289806342221e+02],
                             [0.0                   , 8.2177218160147447e+02, 2.4795144956871457e+02],
                             [0.0                   , 0.0                   , 1.0                   ]])
    distCoeffs = np.array([4.4824308523616324e-02, -7.4951985348854000e-01, 3.7539708742088725e-03, 8.8931335565222442e-03, 3.7214188475390984e+00])

    rvecs = np.array([])
    tvecs = np.array([])
    
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)

    if (corners != []): 
        rvecs, tvecs = aruco.estimatePoseSingleMarkers(corners, 0.05, cameraMatrix, distCoeffs)
        """
        frame = aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvecs, tvecs, 0.1)
        frame = aruco.drawDetectedMarkers(frame, corners, ids)
        """
        
    return frame, rvecs, tvecs
    time1 = time.time()
    ret, frame = cap.read()
    # operations on the frame
    gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
    # lists of ids and the corners belonging to each id
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=parameters)
    # cv2.line(frame, (640, 360), (860, 360), (0, 0, 255), 5)
    # cv2.line(frame, (640, 360), (640, 580), (0, 255, 0), 5)
    frame = draw_axis(frame, cx, cy)
    # index = 0

    if ids is not None:
        # time1 = time.time()
        ret1 = aruco.estimatePoseSingleMarkers(corners=corners,
                                               markerLength=0.1,
                                               cameraMatrix=mtx,
                                               distCoeffs=dist)
        rvec, tvec = ret1[0][0, 0, :], ret1[1][0, 0, :]
        # -- Draw the detected marker and put a reference frame over it
        aruco.drawDetectedMarkers(frame, corners, ids)
        aruco.drawAxis(frame, mtx, dist, rvec, tvec, 0.1)
        (rvec - tvec).any()  # get rid of that nasty numpy value array error
        str_position0 = "Marker Position in Camera frame: x=%f  y=%f  z=%f" % (
            tvec[0], tvec[1], tvec[2])
        cv2.putText(frame, str_position0, (0, 50), font, 1, (0, 255, 0), 2,
                    cv2.LINE_AA)

        # Transformation matrix from marker to camera
        rmat1, j5 = cv2.Rodrigues(rvec)
        pose = rmat1.transpose()
        tvec1 = np.dot(-pose, tvec)
Example #4
0
    keyb_frame = np.zeros_like(frame)

    if mode == "typing" or mode == "keyboard_calibration":
        gray_frame       = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        corners, ids, _  = aruco.detectMarkers(gray_frame, aruco_dict, parameters=parameters)
        keyb_aruco_pos.x = keyb_aruco_pos.y  = keyb_aruco_pos.theta = -1

        if np.all(ids != None):
            for i in range(0, ids.size):
                M = cv2.moments(corners[i])
                if ids[i,0] == 10:
                    keyb_aruco_pos.x = kx = int(M["m10"] / M["m00"])
                    keyb_aruco_pos.y = ky = int(M["m01"] / M["m00"])
                    keyb_aruco_pos.theta = slope_deg

                    rvec, tvec ,_ = aruco.estimatePoseSingleMarkers(corners[i], 0.05, mtx, dist)
                    aruco.drawAxis(frame, mtx, dist, rvec, tvec, 0.1)
                    rmat, _  = cv2.Rodrigues(rvec)
                    _,_,rz   = np.degrees(rotation_matrix_to_euler_angles(rmat))
                    # keyb_aruco_pos.theta = np.round(rz, 2)

                    cv2.putText(frame, "angle= " + str(rz), (kx, ky+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)

        keyb_aruco_pos_pub.publish(keyb_aruco_pos)
        aruco.drawDetectedMarkers(frame, corners, ids)

    if detections is not None:
        tracked_objects = mot_tracker.update(detections.cpu())
        unique_labels   = detections[:, -1].cpu().unique()

        if  tracked_objects.size != 0:
Example #5
0
    def __call__(self):
        #turn multiprocessing on

        print("Trying to detect a target in a new frame.")
        """Detects ARUCO targets."""
        self.detected = False
        gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        status = "No Marker Detected"
        print(gray)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, self.aruco_dict, parameters=self.parameters)

        print("Got here0.")
        if ids is not None:
            #self.frame = aruco.drawDetectedMarkers(self.frame, corners, ids)
            rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                corners, .203, self.camera_matrix, self.dist_coeff)
            for i in range(len(ids)):
                #self.frame =aruco.drawAxis(self.frame,self.camera_matrix,self.dist_coeff,rvec[i],tvec[i],.1)
                eyedee = ids[i][0]
                if eyedee in [t.properties["ident"] for t in self.targs]:
                    #for each target that we recognize
                    targ = self.get_target(eyedee)
                    targ.props_are_set = True  #set a flag that we have seen this particular target
                    targ.r_mat = np.matrix(cv2.Rodrigues(rvec[i])[0])
                    targ.t_vec = np.matrix(tvec[i]).T

                    camera_points = cv2.projectPoints(
                        np.array([(targ.properties["pos_rel"]).T]), rvec[i],
                        tvec[i], self.camera_matrix, self.dist_coeff)
                    camera_points = np.array(camera_points[0][0][0])

                    #obtain the position of the (0,0,0) point of the camera in the real world
                    targ.position_camera = -targ.r_mat.T * targ.t_vec
                    targ.d_cam_image = np.linalg.norm(
                        targ.position_camera + (targ.properties["pos_rel"]).T)

                    #store position of camera target
                    targ.camera_frame_loc = camera_points

                    ## draw a lil circle
                    status = "Marker detected, x: %.2f, y: %.2f" % (
                        camera_points[0], camera_points[1])
        #print("Distance to target %.2f"%targ.d_cam_image)
        #cv2.circle(self.frame,(int(camera_points[0]),int(camera_points[1])),4,(255,0,0))
        print("Got here1.")
        ## Display the resulting frame
        #cv2.putText(self.frame, status, (20,30), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,255),2)
        #self.out.write(self.frame)

        camera_positions = np.array([0, 0])
        d_cam_image = 0
        count = 0
        print("Got here3.")

        #somehow fuse the measurements from each target (average for now)
        for t in self.targs:
            if t.props_are_set:
                camera_positions += t.camera_frame_loc
                d_cam_image += t.d_cam_image
                count += 1
                t.props_are_set = False  #reset flag
        if count >= 1:
            #if we detected at least 1 target
            camera_positions /= count
            #populate these variables so that they are passed to the copter commands
            self.cX = camera_positions[0]
            self.cY = camera_positions[1]
            self.d_cam_image = d_cam_image / count
            self.detected = True  #indicate that we have detected at least one target
        print("Got here.")
        return [self.detected, self.cX, self.cY, self.d_cam_image]
Example #6
0
    # blur it and convert it to the HSV
    # color space
    #blurred = cv2.GaussianBlur(frame, (11, 11), 0)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=parameters)

    # draw markers
    frame = aruco.drawDetectedMarkers(frame, corners, ids)

    if corners:

        rvecs, tvecs, trash = aruco.estimatePoseSingleMarkers(
            corners, size_of_marker, mtx, dist)

        tAverage = None
        rAverage = None

        # get reference coordinates
        if refID in ids:
            idx = np.where(ids == refID)
            rRefMtx, _jacob = cv2.Rodrigues(rvecs[idx])
            tRefVec = tvecs[idx]
            frame = aruco.drawAxis(frame, mtx, dist, rRefMtx, tRefVec,
                                   length_of_axis)
        else:
            print("ref Marker not found!!!!!!!!!!!!!!!!!!!!!!!")
            break
Example #7
0
    
    #image read
    ret, image = cap.read()
    
    
    #Aruco Marker detection
    corners, ids, rejectedImgPoints = aruco.detectMarkers(image=image, dictionary=arucoDict, cameraMatrix=cameraMatrix, distCoeff=distCoeffs, parameters = parameters)
   

    #Draw the Makres image
    image = aruco.drawDetectedMarkers(image, corners, ids, borderColor=(0, 255, 0))
   
    #estimate posestion
    #second parameter is the length of the marker's size (unit is meters)
    #it affect translation and rotation vectors
    rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, 0.1, cameraMatrix, distCoeffs) 
 
    
    
    if ids is not None:
        for i in range(0,ids.size):
             #draw the each axis on screen
            aruco.drawAxis(image, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1 )
            """
            #픽셀로 x,y 좌표를 구하는 코드이며 이 부분은 사용되지 않았고 대신 translation vector를 이용하였습니다.
            #기록의 이유로 코드는 남겨놓습니다.
            #calculate center coordinates
            #x = (corners[i][0][0][0] + corners[i][0][1][0] + corners[i][0][2][0] + corners[i][0][3][0]) / 4
            #y = (corners[i][0][0][1] + corners[i][0][1][1] + corners[i][0][2][1] + corners[i][0][3][1]) / 4
            
            #pixel scaling 카메라 ~ 마커사이거리 188cm 
Example #8
0
def vision():

    # Load the camera calibration values
    Camera = np.load('Calibration_Surface_Book_Rear_Camera.npz')
    CM = Camera['CM']  # camera matrix
    dist_coef = Camera['dist_coef']  # distortion coefficients from the camera

    aruco_dict = aruco.Dictionary_get(
        aruco.DICT_4X4_50)  # Load the aruco dictionary
    pa = aruco.DetectorParameters_create()  # Set the detection parameters

    # Select the correct camera (0) = front camera, (1) = rear camera
    cap = cv2.VideoCapture(1)

    # Set the width and heigth of the camera to 640x480
    cap.set(3, 640)
    cap.set(4, 480)

    # Create two opencv named windows
    cv2.namedWindow("frame-image", cv2.WINDOW_AUTOSIZE)

    # Position the window
    cv2.moveWindow("frame-image", 0, 0)

    t_end = time.time() + 10

    # Execute this continuously
    while time.time() < t_end:
        # Capture current frame from the camera
        ret, frame = cap.read()

        # Convert the image from the camera to Gray scale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Run the detection formula
        corners, ids, rP = aruco.detectMarkers(gray, aruco_dict)

        # # Count the number of Arucos visible
        # try:
        #     IDScount = len(ids)
        # except:
        #     IDScount = 0

        # Calculate the pose of the markers
        rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(
            corners, 53, CM, dist_coef
        )  # <<<< IMPORTANT: number needs changing to width of printed arucos (in mm)
        # Draw the detected markers as an overlay
        out = aruco.drawDetectedMarkers(frame, corners, ids)

        # Create Coordinate Storage Arrays
        X = []  #X Coordinate Locations Array
        Y = []
        Z = []
        ID = []

        # Run loop if ids are detected
        if ids is not None:
            for i, id in enumerate(ids):
                # Overlay the axis on the image
                out = aruco.drawAxis(out, CM, dist_coef, rvecs[i][0][:],
                                     tvecs[i][0][:], 30)
                # Print the tvecs tranformation matrix or Aruco coordinates
                # print("X = {:4.1f} Y = {:4.1f} Z = {:4.1f} ID = {:2d}".format(tvecs[i][0][0], tvecs[i][0][1], tvecs[i][0][2], ids[i][0]))
                X.append(tvecs[i][0][0])
                Y.append(tvecs[i][0][1])
                Z.append(tvecs[i][0][2])
                ID.append(ids[i][0])
                # debugTEST = []

        # Display the original frame in a window and aruco markers
        cv2.imshow('frame-image', frame)

        # If the button q is pressed in one of the windows
        if cv2.waitKey(20) & 0xFF == ord('q'):
            # Exit the While loop
            break

    # When everything done, release the capture
    cap.release()
    # close all windows
    cv2.destroyAllWindows()
    # # exit the kernel
    # exit(0)
    return X, Y, Z, ID, rvecs
def main(marker_length, marker_separation):

    # opencv/aruco settings
    cam = cv2.VideoCapture(0)
    ardict = aruco.Dictionary_get(aruco.DICT_4X4_1000)
    board = aruco.GridBoard_create(4, 5, marker_length, marker_separation,
                                   ardict)

    parameters = aruco.DetectorParameters_create()
    parameters.minMarkerPerimeterRate = 0.03

    # load camera parameters
    with open("calib.json", 'r') as f:
        data = json.load(f)

    dr = DrawClass(data.get('camera_matrix'), data.get('dist_coeff'))
    # and make some variables for aruco
    mtx = dr.mtx
    dist = dr.dist

    # print(cv2.calibrationMatrixValues(mtx, (640, 480), 3.6, 2.7))

    # globals variables
    cs = CoordinatesSystem()

    while cam.isOpened():
        k = cv2.waitKey(16)

        ret, frame = cam.read()
        info_screen = np.ones((500, 1000, 3), np.uint8)
        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

        corners, ids, rejected_img_points = aruco.detectMarkers(
            gray, ardict, parameters=parameters)

        if corners is not None:
            frame = dr.draw_markers(frame, corners, ids)

            r_vecs, t_vecs, _ = aruco.estimatePoseSingleMarkers(
                corners, marker_length, mtx, dist)

            markers = [
                Marker(ids[i][0], r_vecs[i], t_vecs[i])
                for i in range(len(corners))
            ]
            frame = dr.draw_axis_of_markers(frame, markers, 1)
            info_screen = dr.print_text(info_screen, markers)

            if len(markers) == 2:
                # rv, tv = relative_position(markers[0].r_vec, markers[0].t_vec, markers[1].r_vec, markers[1].t_vec)
                tv = relative_position2(markers[0].r_vec, markers[0].t_vec,
                                        markers[1].t_vec)
                info_screen = dr.draw_debug_lines(info_screen, tv)
                print(tv)

            if len(markers) == 1 and k == ord('p'):
                cs.reset(markers[0].r_vec, markers[0].t_vec)
                print(cs.rot_matrix)

            if len(markers) == 1 and cs.fcs_r_vec is not None:
                tv3 = cs.get_coordinates_in_cs(markers[0].t_vec)
                frame = dr.draw_axis_points(frame, cs.fcs_r_vec, cs.fcs_t_vec)
                info_screen = dr.draw_debug_lines(info_screen, tv3)
                print(tv3)

        cv2.line(frame, (320, 240), (320, 240), (0, 0, 0), 4)

        cv2.imshow("frame", frame)
        cv2.imshow("info", info_screen)

        if k == ord('q'):
            cam.release()
            break
        ###########################################################################
        # TODO: Add validation here to reject IDs/corners not part of a gridboard #
        ###########################################################################

        # Outline all of the markers detected in our image
        QueryImg = aruco.drawDetectedMarkers(QueryImg,
                                             corners,
                                             borderColor=(0, 0, 255))

        # Require 15 markers before drawing axis
        if ids is not None and len(ids) > 3:
            # Estimate the posture of the gridboard, which is a construction of 3D space based on the 2D video
            #pose, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, cameraMatrix, distCoeffs)
            #if pose:
            #    # Draw the camera posture calculated from the gridboard
            #    QueryImg = aruco.drawAxis(QueryImg, cameraMatrix, distCoeffs, rvec, tvec, 0.3)
            # Estimate the posture per each Aruco marker
            rvecs, tvecs = aruco.estimatePoseSingleMarkers(
                corners, 1, cameraMatrix, distCoeffs)
            for rvec, tvec in zip(rvecs, tvecs):
                QueryImg = aruco.drawAxis(QueryImg, cameraMatrix, distCoeffs,
                                          rvec, tvec, 1)
        # Display our image
        cv2.imshow('QueryImage', QueryImg)

    # Exit at the end of the video on the 'q' keypress
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cv2.destroyAllWindows()
Example #11
0
def detect_show_marker(img, gray, aruco_dict, parameters, cameraMatrix,
                       distCoeffs):
    detected_1, detected_2 = False, False
    i, j = None, None
    distance_1, distance_2 = None, None
    font = cv2.FONT_HERSHEY_SIMPLEX
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=parameters)
    img = aruco.drawDetectedMarkers(img, corners, ids)
    if ids is not None:
        i = 6  # Id of aruco - reference system.
        j = 5  # Id of target aruco.
        for k in range(0, len(ids)):
            rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(
                corners[k], 0.045, cameraMatrix, distCoeffs)
            if ids[k] == i:
                img = aruco.drawAxis(img, cameraMatrix, distCoeffs, rvec, tvec,
                                     0.05)
                c_rvec = rvec
                c_tvec = tvec
                distance_1 = tvec[0][0][2]
                detected_1 = True
            elif ids[k] == j:
                n_rvec = rvec
                n_tvec = tvec
                distance_2 = tvec[0][0][2]
                detected_2 = True
            if (detected_1 == True) and (detected_2 == True):
                frvec, ftvec = relatPos(c_rvec, c_tvec, n_rvec, n_tvec)
                """ frvec - orientation vector of the marker regarding the reference 
                system.
                ftvec -  position of aruco regarding the rs.
                n_tvec - position of aruco regarding camera. """

                # Orientation aruco regarding the rs.
                angles0 = rotmtx_to_euler_angles(cv2.Rodrigues(frvec)[0])

                n_rmat = cv2.Rodrigues(n_rvec)[0]

                # Orientation aruco regarding camera.
                angles1 = rotmtx_to_euler_angles(n_rmat)

                # Camera position regarding aruco.
                pos_cam_to_aruco = -np.matrix(n_rmat).T * np.matrix(n_tvec).T

                cam_rotmtx = np.matrix(n_rmat).T

                # Reverse orientation camera to the aruco.
                angles2 = rotmtx_to_euler_angles(cam_rotmtx)

                rmat = cv2.Rodrigues(c_rvec)[0]

                # Camera position regarding the rs.
                pos_cam_to_rs = -np.matrix(rmat).T * np.matrix(c_tvec).T

                # Reverse orientation camera regarding the rs.
                angles3 = rotmtx_to_euler_angles(rmat)
    if (distance_1 is not None):
        cv2.putText(img, 'Id' + str(i) + ' %.2fsm' % (distance_1 * 100),
                    (0, 64), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
    if (distance_2 is not None):
        cv2.putText(img, 'Id' + str(j) + ' %.2fsm' % (distance_2 * 100),
                    (0, 104), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
    return cv2.imshow('frame', img)  # Final img.
Example #12
0
def roda_todo_frame(imagem):
    #print("frame")

    try:
        cv_image = bridge.compressed_imgmsg_to_cv2(imagem,
                                                   "bgr8")  # imagem compressed
        #cv_image = cv2.flip(cv_image, -1) # Descomente se for robo real
        #cv_image = bridge.imgmsg_to_cv2(imagem, "bgr8") 			# imagem não compressed
        #cv_image = cv2.resize(cv_image,(cv_image.shape[1]*2,cv_image.shape[0]*2)) # resize image se necessario

        gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        print(ids)

        if ids is not None:
            #-- ret = [rvec, tvec, ?]
            #-- rvec = [[rvec_1], [rvec_2], ...] vetor de rotação
            #-- tvec = [[tvec_1], [tvec_2], ...] vetor de translação
            ret = aruco.estimatePoseSingleMarkers(corners, marker_size,
                                                  camera_matrix,
                                                  camera_distortion)
            rvec, tvec = ret[0][0, 0, :], ret[1][0, 0, :]

            #-- Desenha um retanculo e exibe Id do marker encontrado
            aruco.drawDetectedMarkers(cv_image, corners, ids)
            aruco.drawAxis(cv_image, camera_matrix, camera_distortion, rvec,
                           tvec, 1)

            #-- Print tvec vetor de tanslação em x y z
            str_position = "Marker x=%4.0f  y=%4.0f  z=%4.0f" % (
                tvec[0], tvec[1], tvec[2])
            print(str_position)
            cv2.putText(cv_image, str_position, (0, 100), font, 1, (0, 255, 0),
                        1, cv2.LINE_AA)

            ##############----- Referencia dos Eixos------###########################
            # Linha referencia em X
            cv2.line(cv_image,
                     (int(cv_image.shape[1] / 2), int(cv_image.shape[0] / 2)),
                     ((int(cv_image.shape[1] / 2) + 50),
                      (int(cv_image.shape[0] / 2))), (0, 0, 255), 5)
            # Linha referencia em Y
            cv2.line(
                cv_image,
                (int(cv_image.shape[1] / 2), int(cv_image.shape[0] / 2)),
                (int(cv_image.shape[1] / 2), int(
                    (cv_image.shape[0] / 2 + 50))), (0, 255, 0), 5)

            #####################---- Distancia Euclidiana ----#####################
            # Calcula a distancia usando apenas a matriz tvec, matriz de tanslação
            # Pode usar qualquer uma das duas formas
            distance = np.sqrt(tvec[0]**2 + tvec[1]**2 + tvec[2]**2)
            distancenp = np.linalg.norm(tvec)

            #-- Print distance
            str_dist = "Dist aruco=%4.0f  dis.np=%4.0f" % (distance,
                                                           distancenp)
            print(str_dist)
            cv2.putText(cv_image, str_dist, (0, 15), font, 1, (0, 255, 0), 1,
                        cv2.LINE_AA)

            #####################---- Distancia pelo foco ----#####################
            #https://www.pyimagesearch.com/2015/01/19/find-distance-camera-objectmarker-using-python-opencv/

            # raspicam v2 focal legth
            FOCAL_LENGTH = 3.6  #3.04
            # pixel por unidade de medida
            m = (camera_matrix[0][0] / FOCAL_LENGTH +
                 camera_matrix[1][1] / FOCAL_LENGTH) / 2
            # corners[0][0][0][0] = [ID][plano?][pos_corner(sentido horario)][0=valor_pos_x, 1=valor_pos_y]
            pixel_length1 = math.sqrt(
                math.pow(corners[0][0][0][0] - corners[0][0][1][0], 2) +
                math.pow(corners[0][0][0][1] - corners[0][0][1][1], 2))
            pixel_length2 = math.sqrt(
                math.pow(corners[0][0][2][0] - corners[0][0][3][0], 2) +
                math.pow(corners[0][0][2][1] - corners[0][0][3][1], 2))
            pixlength = (pixel_length1 + pixel_length2) / 2
            dist = marker_size * FOCAL_LENGTH / (pixlength / m)

            #-- Print distancia focal
            str_distfocal = "Dist focal=%4.0f" % (dist)
            print(str_distfocal)
            cv2.putText(cv_image, str_distfocal, (0, 30), font, 1, (0, 255, 0),
                        1, cv2.LINE_AA)

            ####################--------- desenha o cubo -----------#########################
            # https://github.com/RaviJoshii/3DModeler/blob/eb7ca48fa06ca85fcf5c5ec9dc4b562ce9a22a76/opencv/program/detect.py
            m = marker_size / 2
            pts = np.float32([[-m, m, m], [-m, -m, m], [m, -m, m], [m, m, m],
                              [-m, m, 0], [-m, -m, 0], [m, -m, 0], [m, m, 0]])
            imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix,
                                          camera_distortion)
            imgpts = np.int32(imgpts).reshape(-1, 2)
            cv_image = cv2.drawContours(cv_image, [imgpts[:4]], -1,
                                        (0, 0, 255), 4)
            for i, j in zip(range(4), range(4, 8)):
                cv_image = cv2.line(cv_image, tuple(imgpts[i]),
                                    tuple(imgpts[j]), (0, 0, 255), 4)
            cv_image = cv2.drawContours(cv_image, [imgpts[4:]], -1,
                                        (0, 0, 255), 4)

        # Exibe tela
        cv2.imshow("Camera", cv_image)
        cv2.waitKey(1)
    except CvBridgeError as e:
        print('ex', e)
Example #13
0
    def findMarker(self, req):
        
        
        self.rvecs_arr = np.zeros((3, NUMBER))
        self.tvecs_arr = np.zeros((3, NUMBER))
        res = aruco_infoResponse()

        for order in range (NUMBER):
            # Capturing each frame of our video stream
            # ret, self.QueryImg = self.cam.read()
            # frames = self.pipeline.wait_for_frames()
            # color_frame = frames.get_color_frame()
            # self.QueryImg = np.asanyarray(color_frame.get_data())
            if True:

                # grayscale image
                gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)

                # Detect Aruco markers
                corners, ids, _ = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

                # Refine detected markers
                # Eliminates markers not part of our board, adds missing markers to the board
                # corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
                #         image = gray,
                #         board = board,
                #         detectedCorners = corners,
                #         detectedIds = ids,
                #         rejectedCorners = rejectedImgPoints,
                #         self.cameraMatrix = self.cameraMatrix,
                #         self.distCoeffs = self.distCoeffs)   

                ###########################################################################
                # TODO: Add validation here to reject IDs/corners not part of a gridboard #
                ###########################################################################

                # Require 15 markers before drawing axis
                if ids is not None and len(ids) > 0:
                    # Estimate the posture of the gridboard, which is a construction of 3D space based on the 2D video 
                    #pose, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, self.cameraMatrix, self.distCoeffs)
                    #if pose:
                    #    # Draw the camera posture calculated from the gridboard
                    #    self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, rvec, tvec, 0.3)
                    # Estimate the posture per each Aruco marker
                    print('ids is ', ids)
                    self.rvecs, self.tvecs, _ = aruco.estimatePoseSingleMarkers(corners, self.markersize, self.cameraMatrix, self.distCoeffs)
                    print('fuckkk')           
                    for _id, rvec, tvec in zip(ids, self.rvecs, self.tvecs):
                        if _id[0] == req.id:
                            for i in range(3):
                                self.rvecs_arr[i][order] = rvec[0][i]
                                self.tvecs_arr[i][order] = tvec[0][i]
                        
                    #     self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, rvec, tvec, 0.02)
                cv2.waitKey(10)
                # Display our image
        # print('self.rvecs_arr = ', self.rvecs_arr)
        # print('self.tvecs_arr = ', self.tvecs_arr)
        cv2.destroyAllWindows()
        r_avg = np.zeros(3) 
        t_avg = np.zeros(3)

        ra = self.rvecs_arr[0].nonzero()
        
        rb = self.rvecs_arr[1].nonzero()
        rc = self.rvecs_arr[2].nonzero()
        tx = self.tvecs_arr[0].nonzero()
        ty = self.tvecs_arr[1].nonzero()
        tz = self.tvecs_arr[2].nonzero()
        ra = self.rvecs_arr[0][ra]
        rb = self.rvecs_arr[1][rb]
        rc = self.rvecs_arr[2][rc]
        tx = self.tvecs_arr[0][tx]
        ty = self.tvecs_arr[1][ty]
        tz = self.tvecs_arr[2][tz]
        ra = np.sort(ra, kind = 'quicksort')
        rb = np.sort(rb, kind = 'quicksort')
        rc = np.sort(rc, kind = 'quicksort')
        tx = np.sort(tx, kind = 'quicksort')
        ty = np.sort(ty, kind = 'quicksort')
        tz = np.sort(tz, kind = 'quicksort')
        r = np.array((ra, rb, rc))
        t = np.array((tx, ty, tz))
        for i in range(3):
            rv, tv = r[i], t[i]
            
            while np.std(rv) > 0.01 and len(rv) >= NUMBER*0.2:
                if abs(rv[0] - np.average(rv)) > abs(rv[-1] - np.average(rv)):
                    rv = np.delete(rv, 0)
                else:
                    rv = np.delete(rv, -1)
            while np.std(tv) > 0.01 and len(tv) >= NUMBER*0.2:
                if abs(tv[0] - np.average(tv)) > abs(tv[-1] - np.average(tv)):
                    tv = np.delete(tv, 0)
                else:
                    tv = np.delete(tv, -1)
            
            r_avg[i] = np.average(rv)
            t_avg[i] = np.average(tv)
        
        # print('[_id, r,t] = ', [_id, r,t])
        # res.ids.append(_id)
        # res.rvecs = np.append(res.rvecs, r_avg)
        # res.tvecs = np.append(res.tvecs, t_avg)
        res.rvecs = r_avg
        res.tvecs = t_avg
        print('res.rvecs is ', res.rvecs)
        print('res.tvecs is ', res.tvecs)
        result = np.array(())
        result = np.append(result, [np.copy(r_avg), np.copy(t_avg)])
        
        result = result.reshape(2,3)

        if self.name == 'test':
            # Outline all of the markers detected in our image
            self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners, borderColor=(0, 0, 255))
            self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, result[0], result[1], 0.02)
            print('ffffffuck')
            curr_path = os.path.dirname(os.path.abspath(__file__))
            filename = curr_path + "/pic/camera-pic-of-charucoboard-" +  str(int(self.frameId)) + ".jpg"
            cv2.imwrite(filename, self.QueryImg)
            self.frameId += 1
            # cv2.imwrite('./123%d.jpg'%self.cnd, self.QueryImg)
            # self.cnd += 1
            # cv2.namedWindow('Amanda', cv2.WINDOW_AUTOSIZE)
            # self.QueryImg = cv2.imread('./123%d.jpg'%self.cnd)
            # cv2.imshow('Amanda', self.QueryImg)
            # cv2.waitKey(1000)
            # cv2.destroyAllWindows()

            # time.sleep(2)
            print('------')
            # while not cv2.waitKey(1) & 0xFF == ord('q'):
            #     pass
            # cv2.destroyAllWindows()
        return res
Example #14
0
def aruco_fun():
	global pipe, cameraMatrix, distCoeffs, depth_intrinsics
	axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
	if True:
		
		frames = pipe.wait_for_frames()
		align_to = rs.stream.color
		align = rs.align(align_to)
		aligned_frames = align.process(frames)
		depth_frame = aligned_frames.get_depth_frame()
		color_frame = aligned_frames.get_color_frame()
		color_img = np.array(color_frame.get_data())
		color_img_temp = copy.deepcopy(color_img)
		depth_img = np.array(depth_frame.get_data())
		frame = color_img_temp 
		gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
		aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
		parameters =  aruco.DetectorParameters_create()
		corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
		#print(corners,ids)
		#center_p(corners, depth_img,cameraMatrix)
		#if False:
		if ids is not None and len(ids) > 0:
			global wheel_chair_translation_vectors, find_wheel_chair
			#print("len",len(ids[0]))
			num = int(len(ids))
			for id_obj in range(num):

				if ids[id_obj] == 6:
					find_wheel_chair = 1
					wheel_chair_translation_vectors = get_xyz(corners[id_obj], depth_img,cameraMatrix,depth_intrinsics)
				elif ids[id_obj]!=6:
					if find_wheel_chair == 0:
						continue
					obj_translation_vectors = get_xyz(corners[id_obj], depth_img,cameraMatrix,depth_intrinsics)
					p = obj_translation_vectors - wheel_chair_translation_vectors
					print(ids[id_obj]," ","postion vetor is",p)
					#process_data(ids[id_obj],p)
		gray = aruco.drawDetectedMarkers(gray, corners)
		cv2.imshow('frame',gray)
		if cv2.waitKey(1) & 0xFF == ord('q'):
			cv2.destroyAllWindows()
			#break
	if False:
	#while(True):
		frames = pipe.wait_for_frames()
		
		color_frame = frames.get_color_frame()
		# Convert images to numpy arrays
		color_image = np.asanyarray(color_frame.get_data())
		gray = cv2.cvtColor(color_image.copy(), cv2.COLOR_RGB2GRAY)
		corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)
		corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
				image = gray,
				board = board,
				detectedCorners = corners,
				detectedIds = ids,
				rejectedCorners = rejectedImgPoints,
				cameraMatrix = cameraMatrix,
				distCoeffs = distCoeffs)
		ProjectImage = color_image.copy()
		ProjectImage = aruco.drawDetectedMarkers(ProjectImage, corners, borderColor=(0, 0, 255))
		print(ids)
		if ids is not None and len(ids) > 0:
			# Estimate the posture per each Aruco marker
			rotation_vectors, translation_vectors, _objPoints = aruco.estimatePoseSingleMarkers(corners, 1, cameraMatrix, distCoeffs)
			#global wheel_chair_rotation_vectors,  wheel_chair_translation_vectors, find_wheel_chair ,w2c
			print("len",len(ids[0]))
			num = int(len(ids))
			for id_obj in range(num):

				if ids[id_obj] == 6:
					find_wheel_chair = 1
					wheel_chair_rotation_vectors =cv2.Rodrigues( rotation_vectors[id_obj][0])[0]
					wheel_chair_translation_vectors = rotation_vectors[id_obj]
					c2w = np.c_[wheel_chair_rotation_vectors,wheel_chair_translation_vectors[0]]
					temp = np.array([0,0,0,1])
					c2w = np.r_[c2w, [temp]]
					#w2c = np.linalg.inv(c2w) 
					w2c = np.c_[wheel_chair_rotation_vectors.T,-wheel_chair_rotation_vectors.T.dot(wheel_chair_translation_vectors[0])]
					w2c = np.r_[w2c, [temp]]
				elif ids[id_obj]!=6:
					if find_wheel_chair == 0:
						continue
					obj_rotation_vectors =cv2.Rodrigues( rotation_vectors[id_obj][0])[0]
					obj_translation_vectors = rotation_vectors[id_obj]
					c2o = np.c_[obj_rotation_vectors,obj_translation_vectors[0]]

					temp = np.array([0,0,0,1])
					c2o = np.r_[c2o, [temp]]					
					w2o = w2c.dot(c2o)
					#p = w2o[:,3][:3]
					p = compute_w2o(wheel_chair_rotation_vectors,wheel_chair_translation_vectors,obj_rotation_vectors,obj_translation_vectors)
					
					print(ids[id_obj]," ","postion vetor is",p)
					#process_data(ids[id_obj],p)
				
			ids = 1

			for rvec in rotation_vectors:
				rotation_mat = cv2.Rodrigues(rvec[0])[0]
				ids = ids+1
				

			for rvec, tvec in zip(rotation_vectors, translation_vectors):
				if len(sys.argv) == 2 and sys.argv[1] == 'cube':
					try:
						imgpts, jac = cv2.projectPoints(axis, rvec, tvec, cameraMatrix, distCoeffs)
						ProjectImage = drawCube(ProjectImage, corners, imgpts)
					except:
						continue
				else:    
					ProjectImage = aruco.drawAxis(ProjectImage, cameraMatrix, distCoeffs, rvec, tvec, 1)
		cv2.imshow('ProjectImage', ProjectImage)
		#frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
		#cv2.imshow('frame',frame_markers)
		#end = datetime.now()
		#exec_time = end - start
		#print("aruco control")
		#print(exec_time,exec_time.total_seconds())
		if cv2.waitKey(1) & 0xFF == ord('q'):
			cv2.destroyAllWindows()
    def set_goal(self, matrix, distortion):
        if self.set:
            if self.goal.visible:
                index = np.where(self.ids == 0)[0][0]
                self.goal.corners = [np.array(self.corners[index], np.float32)]
                self.goal.rvec, self.goal.tvec, _ = aruco.estimatePoseSingleMarkers(self.goal.corners, self.goal.length,
                                                                                    matrix, distortion)
                self.goal.rvec = self.goal.rvec.reshape(3, 1)
                self.goal.tvec = self.goal.tvec.reshape(3, 1)
                image_points_marker, jacobian = cv2.projectPoints(np.array([np.zeros((3,1))]), self.goal.rvec, self.goal.tvec, matrix, None)
                image_points_marker = image_points_marker.astype(int)
                image_point = np.array([[image_points_marker[0][0][0]], [image_points_marker[0][0][1]], [1]])
                Z_CONST = 0.000

                # calculate projection of center of goal marker on board plane
                temp_mat1 = np.matmul(self.inv_rot_mtx, np.matmul(self.inv_mtx, image_point))
                temp_mat2 = np.matmul(self.inv_rot_mtx, self.tvec)
                s = Z_CONST + temp_mat2[2]
                s /= temp_mat1[2]
                board_point = np.matmul(self.inv_rot_mtx, (s * np.matmul(self.inv_mtx, image_point) - self.tvec))

                # set z to Z_CONST again
                board_point[2] = Z_CONST

                # calculate intersection angle of line tvecCamera to boardPoint and board plane, calculate goal marker point on board plane
                board_normal = np.array([0, 0, 1])
                line_camera_point = board_point - self.tvec_camera
                alpha = np.arcsin(np.abs(np.matmul(board_normal, line_camera_point))
                                  / (np.linalg.norm(line_camera_point) * np.linalg.norm(board_normal)))

                # if clause to prevent tan(90) error
                if alpha > np.radians(89):
                    delta_board_point = 0
                else:
                    delta_board_point = (self.goal.heigth) / np.tan(alpha)

                line_camera_point_project_board = line_camera_point
                line_camera_point_project_board[2] = 0
                line_camera_point_project_board_unit = line_camera_point_project_board / np.linalg.norm(
                    line_camera_point_project_board)
                self.goal.position = board_point - delta_board_point * line_camera_point_project_board_unit

                # calculate rotation of goal in board coordinates
                self.goal.rot_mtx, jacobian = cv2.Rodrigues(self.goal.rvec)
                _, _, _, _, _, self.goal.rotation = cv2.RQDecomp3x3(np.matmul(self.goal.rot_mtx, np.transpose(self.rot_mtx)))
                rot_direction = np.matmul(self.goal.rotation, np.array([[1], [0], [0]]))
                self.goal.rotation_z = np.arctan2(rot_direction.item(1,0), rot_direction.item(0,0))
                self.goal.goalline_position = self.goal.position - self.goal.marker_position.item(0) * rot_direction
                arrow_start = self.goal.goalline_position + self.goal.length * rot_direction
                object_points = np.array([self.goal.goalline_position, arrow_start])
                image_points_arrow, jacobian = cv2.projectPoints(object_points, self.rvec, self.tvec, matrix, distortion)
                image_points_arrow = image_points_arrow.astype(int)
                self.goal.end = np.array([image_points_arrow[0][0][0], image_points_arrow[0][0][1]])
                self.goal.start = np.array([image_points_arrow[1][0][0], image_points_arrow[1][0][1]])

                # find valid goal and scoring polygon
                goal_x = self.goal.goalline_position - self.goal.position
                goal_x = goal_x / np.linalg.norm(goal_x)
                goal_y = np.array([[- goal_x[1][0]], [goal_x[0][0]], [0]])
                goalline_point = self.goal.goalline_position
                goalline_point.itemset(2, self.ball.radius)
                goal_area_fr_world = goalline_point + self.goal.goal_area_fr[0] * goal_x + self.goal.goal_area_fr[1] * goal_y
                goal_area_fl_world = goalline_point + self.goal.goal_area_fl[0] * goal_x + self.goal.goal_area_fl[1] * goal_y
                goal_area_br_world = goalline_point + self.goal.goal_area_br[0] * goal_x + self.goal.goal_area_br[1] * goal_y
                goal_area_bl_world = goalline_point + self.goal.goal_area_bl[0] * goal_x + self.goal.goal_area_bl[1] * goal_y
                scoring_area_fr_world = goalline_point + self.goal.goal_area_fr[0] * goal_x + self.goal.goal_area_fr[1] * goal_y
                scoring_area_fl_world = goalline_point + self.goal.goal_area_fl[0] * goal_x + self.goal.goal_area_fl[1] * goal_y
                scoring_area_br_world = goalline_point + self.goal.goal_area_br[0] * goal_x + self.goal.goal_area_br[1] * goal_y
                scoring_area_bl_world = goalline_point + self.goal.goal_area_bl[0] * goal_x + self.goal.goal_area_bl[1] * goal_y
                goal_area = np.array([goal_area_fr_world, goal_area_fl_world, goal_area_bl_world, goal_area_br_world])
                scoring_area = np.array([scoring_area_fr_world, scoring_area_fl_world, scoring_area_bl_world, scoring_area_br_world])
                self.goal.goal_area_points, jacobian = cv2.projectPoints(goal_area, self.rvec, self.tvec, matrix,
                                                                         distortion)
                self.goal.scoring_area_points, jacobian = cv2.projectPoints(scoring_area, self.rvec, self.tvec, matrix,
                                                                         distortion)
                self.goal.goal_area_points = self.goal.goal_area_points.astype(int)
                self.goal.scoring_area_points = self.goal.scoring_area_points.astype(int)

                self.goal.set = True
            else:
                self.goal.set = False
                print('Goal Not Visible')
        else:
            print('Field Not Set')
Example #16
0
print('camera matrix: ', camera_matrix)
print('dist_coeffs: ', dist_coeffs)

fourcc = cv2.VideoWriter_fourcc(*'XVID')
vwriter = cv2.VideoWriter('./out/dist_' + args.id + '.avi',
                          fourcc=fourcc,
                          fps=25,
                          frameSize=(640, 480),
                          isColor=True)

while True:
    try:
        ret, img = cam.read()
        corners, ids, reprojImgPts = aruco.detectMarkers(img, ar_dict)

        rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
            corners, 15.0, camera_matrix, dist_coeffs)
        # print('map_rvecs', map_rvecs)

        dist = np.linalg.norm(tvecs[1] - tvecs[0])

        if start_measure:
            if not offset:
                offset = soll - dist
                starttime = time.time()

            dist_cm.append(dist + offset)
            smoothed_cm.append(smooth(dist_cm[-5:], 5))
            timestamps.append(time.time() - starttime)

            points.setData(timestamps, smoothed_cm, pen='r')
Example #17
0
def main():

    face_landmark_path = './shape_predictor_68_face_landmarks.dat'
    #flags/initializations to be set
    skip_frame = 3
    socket_connect = 1  # set to 0 if we are testing the code locally on the computer with only the realsense tracking.
    simplified_calib_flag = 0  # this is set to 1 if we want to do one-time calibration
    arucoposeflag = 1
    # img_idx keeps track of image index (frame #).
    img_idx = 0
    N_samples_calib = 10  # number of samples for computing the calibration matrix using homography

    if socket_connect == 1:
        #  create a socket object
        s = socket.socket()
        print("Socket successfully created")
        # reserve a port on your computer in our case it is 2020 but it can be anything
        port = 2020
        s.bind(('', port))
        print("socket binded to %s" % (port))

        # put the socket into listening mode
        s.listen(5)
        print("socket is listening")
        c, addr = s.accept()
        print('got connection from ', addr)

    if socket_connect == 1 and simplified_calib_flag == 0:
        arucoinstance = arucocalibclass()
        ReturnFlag = 1
        aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
        marker_len = 0.0645
        MLRSTr = arucoinstance.startcamerastreaming(c, ReturnFlag, marker_len,
                                                    aruco_dict,
                                                    N_samples_calib)
        print(MLRSTr)
    elif socket_connect == 1 and simplified_calib_flag == 1:
        T_M2_RS = np.array([
            -1.0001641, 0.00756584, 0.00479072, 0.03984956, -0.00774137,
            -0.99988126, -0.03246199, -0.01359556, 0.00453644, -0.03251681,
            0.99971441, -0.00428408, 0., 0., 0., 1.
        ])
        T_M2_RS = T_M2_RS.reshape(4, 4)
        MLRSTr = simplified_calib(T_M2_RS, c)
    else:
        MLRSTr = np.array((1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1))
        MLRSTr = MLRSTr.reshape(4, 4)
        print(MLRSTr)
# end socket ######


# Configure depth and color streams of Realsense camera
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    # Start streaming
    profile = pipeline.start(config)
    align_to = rs.stream.color
    align = rs.align(align_to)

    # Intrinsics & Extrinsics of Realsense
    depth_intrin = profile.get_stream(
        rs.stream.depth).as_video_stream_profile().get_intrinsics()
    intr = profile.get_stream(
        rs.stream.color).as_video_stream_profile().get_intrinsics()
    mtx = np.array([[intr.fx, 0, intr.ppx], [0, intr.fy, intr.ppy], [0, 0, 1]])
    dist = np.array(intr.coeffs)
    #load cascade classifier training file for lbpcascade
    lbp_face_cascade = cv2.CascadeClassifier(
        "./haarcascade_frontalface_alt2.xml"
    )  #"/home/supriya/.local/lib/python3.6/site-packages/cv2/data/haarcascade_frontalface_alt2.xml") #cv2.CascadeClassifier('/home/supriya/supriya/FSA-Net/demo/lbpcascade_frontalface.xml')   #cv2.CascadeClassifier('data/lbpcascade_frontalface_improved.xml') # cv2.CascadeClassifier('/home/supriya/supriya/FSA-Net/demo/lbpcascade_frontalface.xml')
    facemark = cv2.face.createFacemarkLBF()
    facemark.loadModel('./lbfmodel.yaml')

    print('Start detecting pose ...')
    detected_pre = []

    while True:
        # get video frame
        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)
        color_frame = aligned_frames.get_color_frame()
        aligned_depth_frame = aligned_frames.get_depth_frame()

        if not aligned_depth_frame or not color_frame:
            continue
        # read color frame
        input_img = np.asanyarray(color_frame.get_data())
        #increment count of the image index
        img_idx = img_idx + 1
        img_h, img_w, _ = np.shape(input_img)

        # Process the first frame and every frame after "skip_frame" frames
        if img_idx == 1 or img_idx % skip_frame == 0:
            # convert image to grayscale
            gray_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
            # detect faces using LBP detector
            faces = lbp_face_cascade.detectMultiScale(gray_img,
                                                      scaleFactor=1.1,
                                                      minNeighbors=5)
            #draw rectangle around detected face
            for (x, y, w, h) in faces:
                cv2.rectangle(input_img, (x, y), (x + w, y + h), (0, 255, 0),
                              2)

            depth_point = [0, 0, 0]
            if len(faces) > 0:
                #detect landmarks
                status, landmarks = facemark.fit(gray_img, faces)
                #draw dots on the detected facial landmarks
                for f in range(len(landmarks)):
                    cv2.face.drawFacemarks(input_img, landmarks[f])
                #get head pose
                reprojectdst, rotation_vec, rotation_mat, translation_vec, euler_angle, image_pts = get_head_pose(
                    landmarks[0][0], mtx, dist)
                # draw circle on image points (nose tip, corner of eye, lip and chin)
                for p in image_pts:
                    cv2.circle(input_img, (int(p[0]), int(p[1])), 3,
                               (0, 0, 255), -1)
            # draw circle on image points (nose tip, corner of eye, lip and chin)
                for p in image_pts:
                    cv2.circle(input_img, (int(p[0]), int(p[1])), 3,
                               (0, 0, 255), -1)

                # draw line sticking out of the nose tip and showing the head pose
                (nose_end_point2D,
                 jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                               rotation_vec, translation_vec,
                                               mtx, dist)
                p1 = (int(image_pts[0][0]), int(image_pts[0][1]))
                p2 = (int(nose_end_point2D[0][0][0]),
                      int(nose_end_point2D[0][0][1]))
                cv2.line(input_img, p1, p2, (255, 0, 0), 2)

                # get 3D co-ord of nose - to get a more accurate estimation of the translaion of the head
                depth = aligned_depth_frame.get_distance(
                    landmarks[0][0][30][0], landmarks[0][0][30][1])
                cv2.circle(input_img,
                           (landmarks[0][0][30][0], landmarks[0][0][30][1]), 3,
                           (0, 255, 0), -1)
                depth_point = rs.rs2_deproject_pixel_to_point(
                    depth_intrin,
                    [landmarks[0][0][30][0], landmarks[0][0][30][1]], depth)
                depth_point = np.array(depth_point)
                depth_point = np.reshape(depth_point, [1, 3])

                #check if the depth estimation is not zero and filters out faces within 0.8 m from the camera
                if (depth_point[0][2] != 0 and depth_point[0][2] < 0.8):  #
                    #Combine rotation matrix and translation vector (given by the depth point) to get the head pose
                    RSTr = np.hstack([rotation_mat, depth_point.transpose()])
                    RSTr = np.vstack([RSTr, [0, 0, 0, 1]])
                    print("head pose", RSTr)

                    # If arucoposeflag = 1, an Aruco marker will  get detected and its transformed pose will be streamed to the AR headset and the pose
                    # of the tracking parent will be updated to reflect Aruco marker pose. This can be used to verify/test the accuracy of teh calibration
                    if arucoposeflag == 1:
                        print("aruco")
                        gray = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
                        # set dictionary size depending on the aruco marker selected
                        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
                        # detector parameters can be set here (List of detection parameters[3])
                        parameters = aruco.DetectorParameters_create()
                        parameters.adaptiveThreshConstant = 10
                        # lists of ids and the corners belonging to each id
                        corners, ids, rejectedImgPoindetectorts = aruco.detectMarkers(
                            gray, aruco_dict, parameters=parameters)
                        # font for displaying text (below)
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        # check if the ids list is not empty
                        if np.all(ids != None):
                            # estimate pose of each marker and return the values
                            rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                                corners, 0.045, mtx, dist
                            )  # 0.0628 (0.061 if using Dell laptop - 95% zoom)
                            for i in range(0, ids.size):
                                # draw axis for the aruco markers
                                aruco.drawAxis(input_img, mtx, dist, rvec[i],
                                               tvec[i], 0.1)

    # draw a square around the markers
                            aruco.drawDetectedMarkers(input_img, corners)
                            #Combine rotation matrix and translation vector to get Aruco pose
                            R_rvec = R.from_rotvec(rvec[0])
                            R_rotmat = R_rvec.as_matrix()
                            AruRSTr = np.hstack(
                                [R_rotmat[0], tvec[0].transpose()])
                            AruRSTr = np.vstack([AruRSTr, [0, 0, 0, 1]])
                            RSTr = AruRSTr
                            print("Aruco pose", AruRSTr)

                    # Since pose detected in OpenCV will be right handed coordinate system, it needs to be converted to left-handed coordinate system of Unity
                    RSTr_LH = np.array([
                        RSTr[0][0], RSTr[0][2], RSTr[0][1], RSTr[0][3],
                        RSTr[2][0], RSTr[2][2], RSTr[2][1], RSTr[2][3],
                        RSTr[1][0], RSTr[1][2], RSTr[1][1], RSTr[1][3],
                        RSTr[3][0], RSTr[3][1], RSTr[3][2], RSTr[3][3]
                    ])  # converting to left handed coordinate system
                    RSTr_LH = RSTr_LH.reshape(4, 4)
                    #Compute the transformed pose to be streamed to MagicLeap
                    HeadPoseTr = np.matmul(MLRSTr, RSTr_LH)

                    if socket_connect == 1:
                        # Array to be sent
                        ArrToSend = np.array([
                            HeadPoseTr[0][0], HeadPoseTr[0][1],
                            HeadPoseTr[0][2], HeadPoseTr[0][3],
                            HeadPoseTr[1][0], HeadPoseTr[1][1],
                            HeadPoseTr[1][2], HeadPoseTr[1][3],
                            HeadPoseTr[2][0], HeadPoseTr[2][1],
                            HeadPoseTr[2][2], HeadPoseTr[2][3],
                            HeadPoseTr[3][0], HeadPoseTr[3][1],
                            HeadPoseTr[3][2], HeadPoseTr[3][3]
                        ])
                        # Send transformed pose to AR headset
                        dataTosend = struct.pack('f' * len(ArrToSend),
                                                 *ArrToSend)
                        c.send(dataTosend)
            else:
                print("No Face Detected")
            # display detected face with landmarks, pose.
            cv2.imshow('Landmark_Window', input_img)

        key = cv2.waitKey(1)
Example #18
0
                                                      (w, h))

    # undistort
    mapx, mapy = cv2.initUndistortRectifyMap(camera_matrix, dist_coeffs, None,
                                             newcameramtx, (w, h), 5)
    dst = cv2.remap(gray_d, mapx, mapy, cv2.INTER_LINEAR)
    # crop the image
    x, y, w, h = roi
    gray = dst[y:y + h, x:x + w]
    #cv2.imshow('frame',gray)

    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, dictionary, parameters=arucoParams)

    if ids is not None:
        rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
            corners, markerLength, camera_matrix, dist_coeffs)
        #retval, rvec, tvec	=	cv.aruco.estimatePoseBoard(	corners, ids, board, camera_matrix, dist_coeffs)
        imgWithAruco = aruco.drawDetectedMarkers(gray, corners,
                                                 ids)  #, (0,255,0))
        if rvec is not None:
            objectPoints = np.asarray([[105, 105, 105]], dtype=np.float)
            imagePoints = cv2.projectPoints(objectPoints, rvec[0], tvec[0],
                                            camera_matrix, dist_coeffs)

            corner1 = np.append(corners[0][0][0],
                                [0]).transpose().reshape(3, 1)
            E0 = np.asarray([[markerLength / 2, 0, 0]]).transpose()
            #print('Tvec_0: ',tvec[0])
            #print('****')
            #print('Rvec_0: ',rvec[0])
            #print('****')
    def track(
        self,
        frame,
        id_to_find=None,
        marker_size=None,
    ):

        marker_found = False
        x = y = z = pitch_camera = x_camera = y_camera = z_camera = 0

        # -- Convert in gray scale
        gray = cv2.cvtColor(
            frame, cv2.COLOR_BGR2GRAY
        )  # -- remember, OpenCV stores color images in Blue, Green, Red

        # -- Find all the aruco markers in the image
        corners, ids, rejected = aruco.detectMarkers(
            image=gray,
            dictionary=self._aruco_dict,
            parameters=self._parameters,
            cameraMatrix=self._camera_matrix,
            distCoeff=self._camera_distortion,
        )
        pitch_marker, roll_marker, yaw_marker = None, None, None
        pitch_camera, roll_camera, yaw_camera = None, None, None

        planned_ids = []
        if not isinstance(ids, None):
            planned_ids = list(itertools.chain(*ids))

        if id_to_find in planned_ids:
            index_id_to_find = planned_ids.index(id_to_find)

            marker_found = True
            # -- array of rotation and position of each marker in camera frame
            # -- rvec = [[rvec_1], [rvec_2], ...]    attitude of the marker respect to camera frame
            # -- tvec = [[tvec_1], [tvec_2], ...]    position of the marker in camera frame
            rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                corners, marker_size, self._camera_matrix,
                self._camera_distortion)

            # -- Unpack the output
            rvec, tvec = rvecs[index_id_to_find][0], tvecs[index_id_to_find][0]

            x = tvec[0]
            y = tvec[1]
            z = tvec[2]

            # -- Obtain the rotation matrix tag->camera
            R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
            R_tc = R_ct.T

            # -- Get the attitude in terms of euler 321 (Needs to be flipped first)
            (
                roll_marker,
                pitch_marker,
                yaw_marker,
            ) = self._rotationMatrixToEulerAngles(self._R_flip * R_tc)

            # -- Now get Position and attitude f the camera respect to the marker
            pos_camera = -R_tc * np.matrix(tvec).T
            x_camera = pos_camera[0]
            y_camera = pos_camera[1]
            z_camera = pos_camera[2]

            (
                roll_camera,
                pitch_camera,
                yaw_camera,
            ) = self._rotationMatrixToEulerAngles(self._R_flip * R_tc)

        if type(None) == type(yaw_marker):
            marker_found = False
            yaw_marker = 0

        if marker_found:
            roll_camera = math.degrees(roll_camera)
            yaw_camera = math.degrees(yaw_camera)
            pitch_camera = math.degrees(pitch_camera)
            roll_marker = math.degrees(roll_marker)
            yaw_marker = math.degrees(yaw_marker)
            pitch_marker = math.degrees(pitch_marker)
            x_camera = float(x_camera)
            y_camera = float(y_camera)
            z_camera = float(z_camera)

        result = (
            marker_found,
            x,
            y,
            z,
            x_camera,
            y_camera,
            z_camera,
            roll_marker,
            yaw_marker,
            pitch_marker,
            roll_marker,
            roll_camera,
            yaw_camera,
            pitch_camera,
        )
        return result
Example #20
0
def image_callback(msg):
    global g
    start_time = time.time()
    print("Received an image frame 1 !")
    global i
    global j
    global pos_x
    global pos_y
    global pos_z

    if i % 6 == 0:  #skippping 6 frames
        try:
            # Convert your ROS Image message to OpenCV2
            cv2_img = bridge.imgmsg_to_cv2(msg, "bgr8")
        except CvBridgeError, e:
            print(e)
        else:
            QueryImg = cv2_img
            ret = True
            if ret == True:
                # grayscale image
                gray = cv2.cvtColor(QueryImg, cv2.COLOR_BGR2GRAY)
                # Detect Aruco markers
                # start_time = time.time()
                corners, ids, rejectedImgPoints = aruco.detectMarkers(
                    gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

                # Refine detected markers
                # Eliminates markers not part of our board, adds missing markers to the board

                #Outline all of the markers detected in our image
                QueryImg = aruco.drawDetectedMarkers(QueryImg,
                                                     corners,
                                                     borderColor=(0, 0, 255))
                camcord = numpy.zeros((3, 6))

                # Require 15 markers before drawing axis    i=i+1
                if ids is not None and len(ids) > 0:
                    # Estimate the posture of the gridboard, which is a construction of 3Dtransformation=numpy.append(temp,tvec,axis=1) space based on the 2D video
                    #pose, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, cameraMatrix, distCoeffs)
                    #if pose:
                    #    # Draw the camera posture calculated from the gridboard
                    #    QreturnpovecueryImg = aruco.drawAxis(QueryImg, cameraMatrix, distCoeffs, rvec, tvec, 0.3)
                    # Estimate the posture per each Aruco marker
                    rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                        corners, 0.07, cameraMatrix, distCoeffs)
                    i = 0
                    arucopos = numpy.zeros((len(ids) + 1, 4))
                    for rvec, tvec in zip(rvecs, tvecs):
                        povec = restPovec(ids[i])
                        QueryImg = aruco.drawAxis(QueryImg, cameraMatrix,
                                                  distCoeffs, rvec, tvec, 0.1)
                        cv2.imwrite("1.png", QueryImg)
                        Rt, _Jacobian = cv2.Rodrigues(rvec)
                        tvec = numpy.transpose(tvec[0])
                        tvec = tvec.reshape(3, 1)
                        temp = numpy.concatenate(Rt)
                        temp = temp.reshape(3, 3)
                        transformation = numpy.append(temp, tvec, axis=1)
                        append = numpy.array([0, 0, 0, 1]).reshape(1, 4)
                        transformation = numpy.append(transformation,
                                                      append,
                                                      axis=0)
                        transformation = numpy.linalg.inv(transformation)
                        arucocord = numpy.matmul(transformation,
                                                 numpy.array([0, 0, 0, 1]))
                        cameracord = arucocord + povec
                        # print(cameracord)
                        arucopos[i, :] = povec
                        i = i + 1
                        if i == 1:
                            posavg = cameracord
                        elif i == len(ids):
                            posavg = cameracord + posavg
                            posavg = posavg / i  #output will hve homogenous plane coordinate =2, because cameracord=arucocord+povec, and we add two  ones
                            # print(posavg)
                            pos_x = posavg[0]
                            pos_y = posavg[1]
                            pos_z = posavg[2]
                            # Set up your subscriber and define its callback
                            targetx = 0.562
                            targety = 1.5742
                            targetz = 1.6742
                            print("pos_x", pos_x, "pos_y", pos_y, "pos_z",
                                  pos_z)
                            # if pos_x<targetx-0.2 or pos_x>targetx+0.2:

                            print(g)
                            if pos_z < targetz - 0.1 or pos_z > targetz + 0.1 and g == 0:
                                print("move zzzz")
                                # dy=
                                move1(0.03, -0.01, 0, 0, 0, 0)
                            else:
                                if g == 0:
                                    print("delay 5 once")
                                    time.sleep(5)
                                    g = 1
                                elif pos_x < targetx - 0.1 or pos_x > targetx + 0.1:
                                    print("move XXX")
                                    move1(0.005, 0.01, 0, 0, 0, 0)
                                    g = 1
                                else:
                                    land()
                                    time.sleep(30)
                            arucopos[i, :] = posavg
                        else:
                            posavg = cameracord + posavg
                    i = 0
        print("computation time", time.time() - start_time)
        print(i)
Example #21
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # times = []

    # loop over frames from the video stream
    while True:
        # Start time
        # start = time.time()

        ret, frame = vs.read()

        # operations on the frame
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # set dictionary size depending on the aruco marker selected
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)

        # detector parameters can be set here (List of detection parameters[3])
        parameters = aruco.DetectorParameters_create()
        parameters.adaptiveThreshConstant = 10

        # lists of ids and the corners belonging to each id
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)

        # font for displaying text (below)
        font = cv2.FONT_HERSHEY_SIMPLEX

        # check if the ids list is not empty
        # if no check is added the code will crash
        if np.all(ids != None):

            # estimate pose of each marker and return the values
            # rvet and tvec-different from camera coefficients
            rvec, tvec = aruco.estimatePoseSingleMarkers(
                corners, 0.05, mtx, dist)
            # (rvec-tvec).any() # get rid of that nasty numpy value array error

            for i in range(0, ids.size):
                # draw axis for the aruco markers
                aruco.drawAxis(frame, mtx, dist, rvec[i], tvec[i], 0.1)

            # draw a square around the markers
            aruco.drawDetectedMarkers(frame, corners)

            # code to show ids of the marker found
            strg = ''
            for i in range(0, ids.size):
                strg += str(ids[i][0]) + ', '

            cv2.putText(frame, "Id: " + strg, (0, 64), font, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)

        else:
            # code to show 'No Ids' when no markers are found
            cv2.putText(frame, "No Ids", (0, 64), font, 1, (0, 255, 0), 2,
                        cv2.LINE_AA)

        # end = time.time()
        #
        # times.append(end - start)
        #
        # if len(times) > 10:
        # 	times = times[:9]
        #
        # cv2.putText(frame, f"FPS: {round(len(times) / sum(times))}", (0, 100), font, 1, (0, 255, 0), 2, cv2.LINE_AA)

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Example #22
0
    client.loop(WORKER_TIME)
    while RUN:
        # client.loop(WORKER_TIME)
        ret, img = stream.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        if CHECKPOINT:
            dst_pt = generate_dst_pt(width, height)
            CHECKPOINT = False

        draw_dst_pt(img, dst_pt)

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)

        if np.all(ids != None):
            rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                corners, 0.05, mtx, dist)  # Estimate pose of each marker
            for id in ids:
                direction_point, cntr = detect_direction(corners[0])
                CHECKPOINT = is_it_checkpoint(cntr, dst_pt)
                direction_point = find_direction_point(direction_point, cntr)
                abs_error_angle = find_angle_error(direction_point, cntr,
                                                   dst_pt)
                error_angle = find_angle_direction(direction_point, cntr,
                                                   dst_pt, abs_error_angle)
                cv2.line(img, direction_point, direction_point, (150, 0, 255),
                         1)
                cv2.line(img, cntr, dst_pt, (30, 255, 15), 1)
                cv2.line(img, cntr, direction_point, (250, 180, 140), 2)
                cv2.circle(img, direction_point, 1, (200, 55, 255), 15,
                           cv2.LINE_8)
                cv2.putText(img, str(error_angle), (25, 25), font, 0.5,
Example #23
0
bytes=''

while True:
    bytes+=stream.read(1024)
    a = bytes.find('\xff\xd8')
    b = bytes.find('\xff\xd9')
    if a!=-1 and b!=-1:
        jpg = bytes[a:b+2]
        bytes= bytes[b+2:]
        image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
        
        markerCorners, markerIds, rejectedImgPoints = aruco.detectMarkers(image, dictionary, cameraMatrix=cameraMatrix, distCoeff=distCoeff)
        if len(markerCorners) > 0:
            image = aruco.drawDetectedMarkers(image, markerCorners, markerIds)
            
            rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(markerCorners, markerLength, cameraMatrix, distCoeff)
            for rvec, tvec in zip(rvecs, tvecs):
                image = aruco.drawAxis(image, cameraMatrix, distCoeff, rvec, tvec, length)
            '''
            
            valid, rvec, tvec = aruco.estimatePoseBoard(markerCorners, markerIds, board, cameraMatrix, distCoeff)
            if valid:
                image = aruco.drawAxis(image, cameraMatrix, distCoeff, rvec, tvec, length)
            '''
        else:
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(image,'No Markers Found',(30,250), font, 2, (255,255,255),2,cv2.LINE_AA)


        cv2.imshow('', image)
        cv2.waitKey(1)
def main():

    ser = serial.Serial('COM3', 115200, timeout=0)
    print(ser.name)

    # targetState = (20,80,0)
    robotNode = 1
    robotTargetLoc1 = (80, 20)
    robotMap = []
    # robotLocation2 = (60,70)
    globalMap = initializeGraph()
    mapping.plotGraph(robotMap, [robotTargetLoc1])
    performRobotWriting = True

    #Holds the values of the four corners of the robotic environment. Goes clockwise starting with the top left corner
    #The ids for the aruco tags for each of the four corners goes from 0 to 3 clockwise, starting with the top left corner
    fourCorners = [[], [], [], []]

    vc = cv2.VideoCapture(1)

    # np.load("sample_images.npz")
    with np.load("sample_images.npz") as data:
        mtx = data['mtx']
        dist = data['dist']
    # ret,frame = vc.read()
    # image = frame
    # time.sleep(1)
    # ret,frame = vc.read()
    # image = frame

    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
    parameters = aruco.DetectorParameters_create()
    iter = 0
    node = 1
    x = ''
    y = ''
    a = ''
    initialized = False
    initialxya = []
    iter = 0
    while (vc.isOpened()):
        # if iter % 90 == 0:
        #     plt.pause(0.001)
        iter = (iter + 1) % 25

        ret, frame = vc.read()
        image = frame
        # image = cv2.imread("52814747.png")
        # image = cv2.imread("arucoTestImage.png")

        # aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            image, aruco_dict, parameters=parameters)
        rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
            corners, 0.1, mtx, dist)
        rotations = []
        if ids is not None:
            for i in range(len(ids)):
                if ids[i] == 5:
                    if rvec[i][0][0] > 0:
                        # rvec[i][0][0] = -1 * rvec[i][0][0]
                        image = aruco.drawAxis(image, mtx, dist, rvec[i],
                                               tvec[i], 0.1)
                        a = rvec[i][0][1]
                        # print(rvec[i])
                        a = objectLocalization.convertAtoRadians(a)
                # if ids[i] < 5:
                #     print("Drawing ID: " + str(ids[i]) )
                #     print("before")
                #     print(rvec[i])
                #     image = aruco.drawAxis(image,mtx,dist,rvec[i],tvec[i],0.1)
                #     print("after")

        if ids is not None:
            for i in range(len(ids)):
                if ids[i] == 0:
                    fourCorners[0] = objectLocalization.getRectMid(
                        corners[i][0])
                elif ids[i] == 1:
                    fourCorners[1] = objectLocalization.getRectMid(
                        corners[i][0])
                elif ids[i] == 2:
                    fourCorners[2] = objectLocalization.getRectMid(
                        corners[i][0])
                elif ids[i] == 3:
                    fourCorners[3] = objectLocalization.getRectMid(
                        corners[i][0])
                elif ids[i] == 5:
                    if (len(fourCorners[0]) > 0 and len(fourCorners[1]) > 0
                            and len(fourCorners[2]) > 0
                            and len(fourCorners[3]) > 0):
                        [x, y] = objectLocalization.scalePoint(
                            fourCorners,
                            objectLocalization.getRectMid(corners[i][0]))
                        [x,
                         y] = objectLocalization.convertToRobotLocation(x, y)

        print(fourCorners)
        # print(corners)
        # print(ids)

        # print(corners, ids, rejectedImgPoints)

        #NOTE: Two lines below used to draw markers originally
        # aruco.drawDetectedMarkers(image, corners, ids)
        # aruco.drawDetectedMarkers(image, rejectedImgPoints, borderColor=(100, 0, 240))

        cv2.imshow('Video', image)

        key = cv2.waitKey(20)

        if key == 27:
            # exit on ESC
            break

        # iter = 0
        # while (iter < 5):
        temp = ser.read(1000)
        # temp = ser.readline()
        if temp != b'':
            if temp == b'q' or temp == 'Q':
                ser.close()
                break
            # print(temp)
            # if temp[0] == '$':
            if len(temp) > 0:
                RFIDinfo = parseRFIDInfo(temp)
                print("RFID Info: ")
                print(RFIDinfo)
                # print("RFIDInfo: " + str(RFIDinfo))
                if RFIDinfo != None:
                    if mapping.euclideanDistance(
                        (x, y), (RFIDinfo[0][0], RFIDinfo[0][1])
                    ) < 10 and mapping.euclideanDistance(
                        (x, y), robotTargetLoc1
                    ) < 10:  #Make sure we're in the range of the tag we think we are
                        (newTargetLoc, info, preprocessedMap
                         ) = mapping.updateMapandDetermineNextStep(
                             robotMap, globalMap, robotTargetLoc1, RFIDinfo)
                        print("------------------" + str(newTargetLoc) +
                              "---------------------")
                        if performRobotWriting:
                            message = prepRFIDInfo(robotTargetLoc1, info,
                                                   preprocessedMap)
                            message = message + "\r\n"
                            ser.write(str.encode(message))
                            print("***********Writing Message*************")
                            print(message)
                            ser.flush()

                        mapping.plotGraph(preprocessedMap, [robotTargetLoc1])
                        robotTargetLoc1 = newTargetLoc
                        # mapping.plotGraph(robotMap,[robotTargetLoc1])

                # if len(code) >= len(shortestSample):
                # # if not xVals and not yVals:
                #     tempState = (int(xVals),int(yVals),0)
                #     if tempState != (0,0,0):
                #         targetState = tempState
                #         print("------------- NEW TARGET STATE ----------")
                #         print(targetState)
        first = True
        job = multiprocessing.Process(target=plotMap, args=(robotMap, (x, y)))

        # time.sleep(0.1)
        # print(x == '')
        # print(y == '')
        # print(a)
        # time.sleep(0.05)
        if (x != '' and y != '' and a != ''):
            # print("x: " + str(x))
            # print("y: " + str(y))
            #
            #     plt.pause(0.001)
            # job.start()
            # job.join()
            # if iter % 45 == 0:
            # job.start()
            # mapping.plotGraph(robotMap,[(x,y)])
            # plotMap(robotMap,(x,y))
            # if first:
            #     job.start()

            if (
                    len(initialxya) < 10
            ):  #looking to find the average of the first several measurements to get an accurate starting point
                initialxya.append((x, y, a))

            else:
                if not initialized:
                    avex = 0
                    avey = 0
                    avea = 0
                    #NOTE: Possible bug could occur if the robot is aligned along 0 degrees since this could fluctuate
                    #between 0 and 2 pi for values (resulting in an average of pi, the exact oppposite direction)
                    for i in range(10):
                        avex = avex + initialxya[i][0]
                        avey = avey + initialxya[i][1]
                        avea = avea + initialxya[i][2]
                    avex = avex / len(initialxya)
                    avey = avey / len(initialxya)
                    avea = avea / len(initialxya)
                    prevxya = (avex, avey, avea)
                    prevxya2 = (avex, avey, avea)
                    initialized = True
                    # print(initialxya)
                    previouslyMissed = False
                    falsePrev = prevxya
                else:
                    # if withinRange(a,prevxya2[2], math.pi * 1/ 2) or (withinRange(a,falsePrev[2],math.pi * 1 /2) and previouslyMissed): #Make sure the angle doesn't flip an unreasonable amount
                    #     if withinRange(a,prevxya2[2], math.pi * 1/ 2) or (withinRange(a,falsePrev[2],math.pi * 1 /2) and previouslyMissed): #Make sure the angle doesn't flip an unreasonable amount

                    # print("Target State: " + str(targetState))
                    # print("A: " + str(a))
                    if (True):
                        # if (not withinRange(a,prevxya[2] - math.pi,math.pi*4/8)):
                        # if (withinRange(a,prevxya2[2], math.pi * 3/ 4) and withinRange(a,prevxya[2], math.pi * 2/4)) or (previouslyMissed and withinRange(a,falsePrev[2],math.pi * 1 /8)): #3/4 1/2Make sure the angle doesn't flip an unreasonable amount
                        message = robotControl.prepareControlMessage(
                            (x, y, a), robotTargetLoc1)
                        message = "$" + str(robotNode) + str(
                            message) + '\r\n'  #preappend the robot node number
                        ser.write(str.encode(message))
                        # print(message)
                        ser.flush()
                        # if (withinRange(a,falsePrev[2],math.pi * 1 /2) and previouslyMissed):
                        #     prevxya2 = falsePrev
                        # else:
                        # prevxya2 = prevxya
                        prevxya2 = prevxya
                        prevxya = (x, y, a)
                        # print(initialxya)
                        # print((avex,avey,avea))
                        # ser.close()
                        # time.sleep(0.1)
                        # print("Just flushed")
                    else:
                        # print("MISSED IT")
                        # print(prevxya)
                        # print(prevxya2)
                        # print(x)
                        # print(y)
                        # print("A: " + str(a))
                        falsePrev = (x, y, a)
                        previouslyMissed = True

    cv2.waitKey(0)
    cv2.destroyAllWindows()
    message = "$" + str(
        robotNode) + "f 0\r\n"  #preappend the robot node number
    ser.write(str.encode(message))
    ser.close()
Example #25
0
        corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)

        if ids is not None:
                rvec = np.zeros((ids.shape[0], 3))
                tvec = np.zeros((ids.shape[0], 3))
     
                gray = aruco.drawDetectedMarkers(gray, corners, borderColor=(0, 255, 0))
                cameraMatrix = np.array([[631.43076033,   0,         319.46293155],[  0,         630.14567027, 241.75853213],[  0,           0,           1,        ]])
                distCoeffs =  np.array([[ 0.06375513, -0.05092762,  0.00133275, -0.00200469, -0.56225284]])
                #print(cameraMatrix)
                ids = ids.reshape(1,-1)
                #print([np.array(corners[0][0])])
                for i in range(ids.shape[1]):
                    cor = [np.array(corners[i][0])]
                    if(ids[0,i]==CT_id):
                        rvec[i], tvec[i], _ = aruco.estimatePoseSingleMarkers(cor, CT_t, cameraMatrix, distCoeffs, rvec[i], tvec[i])
                        
                    if(ids[0,i]==TR_id):
                        rvec[i], tvec[i], _ = aruco.estimatePoseSingleMarkers(cor, TR_t, cameraMatrix, distCoeffs, rvec[i], tvec[i])
                        
                    if(ids[0,i]==TL_id):
                        rvec[i], tvec[i], _ = aruco.estimatePoseSingleMarkers(cor, TL_t, cameraMatrix, distCoeffs, rvec[i], tvec[i])
                        
                    if(ids[0,i]==BR_id):
                        rvec[i], tvec[i], _ = aruco.estimatePoseSingleMarkers(cor, BR_t, cameraMatrix, distCoeffs, rvec[i], tvec[i])
                        
                    if(ids[0,i]==BL_id):
                        rvec[i], tvec[i], _ = aruco.estimatePoseSingleMarkers(cor, BL_t, cameraMatrix, distCoeffs, rvec[i], tvec[i])
                        #print(tvec[i])
                #print(rvec)
                #print(tvec)
def detect():

    if len(sys.argv) < 2:
        with_feed = False
    elif sys.argv[1] == '-withfeed':
        with_feed = True
    else:
        with_feed = False

    RESOLUTION = 90  # Use this to set the resolution for video feed

    DEADZONE_RIGHT = 1.45
    DEADZONE_LEFT = -DEADZONE_RIGHT

    cap = cv2.VideoCapture(0)

    cap.set(cv2.CAP_PROP_FRAME_WIDTH, RESOLUTIONS[RESOLUTION][0])
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, RESOLUTIONS[RESOLUTION][1])

    corners = np.array([[0, 0]] * 4)

    # Length of the AR Tag
    # In meters
    marker_length = 0.06
    marker_size = 0.01

    ar_dict = ar.Dictionary_get(ar.DICT_6X6_250)
    parameters = ar.DetectorParameters_create()

    calibration_file = "calibration.xml"
    calibration_params = cv2.FileStorage(calibration_file,
                                         cv2.FILE_STORAGE_READ)

    camera_matrix = calibration_params.getNode("cameraMatrix").mat()
    dist_coeffs = calibration_params.getNode("distCoeffs").mat()

    ret, frame = cap.read()
    size = frame.shape

    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)

    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype='double')

    while True:
        ret, frame = cap.read()

        size = frame.shape

        picture = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        corners, ids, rejected_img_points = ar.detectMarkers(
            picture, ar_dict, parameters=parameters)

        if with_feed:
            picture = ar.drawDetectedMarkers(picture, corners)

        if len(corners) == 0:
            continue

        # A tag is detected
        print('Corners:', corners[0])

        # Get rotation and translation vectors
        rvec, tvec, _ = ar.estimatePoseSingleMarkers(corners[0], marker_length,
                                                     camera_matrix,
                                                     dist_coeffs)

        object_x = tvec[0][0][0]
        object_z = tvec[0][0][2]

        print('X Distance', object_x)
        print('Z Distance', object_z)

        direction = get_direction(object_x, object_z)

        print('Direction: ', direction)

        if DEADZONE_LEFT < direction < 0:
            print('Turning Left')
        elif 0 < direction < DEADZONE_RIGHT:
            print('Turning Right')
        else:
            print('Going Straight')

        if with_feed:
            picture = ar.drawAxis(picture, camera_matrix, dist_coeffs, rvec,
                                  tvec, marker_size)
            cv2.imshow('frame', picture)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
Example #27
0
def lander():
    global first_run, notfound_count, found_count, marker_size, start_time, vs
    global p, i, d, pid, marker_size

    if first_run == 0:
        print("First run of lander!!")
        first_run = 1
        start_time = time.time()
    frame = vs.read()
    frame = cv2.resize(frame, (horizontal_res, vertical_res))
    frame_np = np.array(frame)
    gray_img = cv2.cvtColor(frame_np, cv2.COLOR_BGR2GRAY)
    ids = ''
    corners, ids, rejected = aruco.detectMarkers(image=gray_img,
                                                 dictionary=aruco_dict,
                                                 parameters=parameters)

    print("DRONE IS IN THE LAND MODE - checking in loop")
    if vehicle.mode != 'LAND':
        vehicle.mode = VehicleMode("LAND")
        while vehicle.mode != 'LAND':
            print('WAITING FOR DRONE TO ENTER LAND MODE')
            time.sleep(1)
    try:
        print('started TRY block')
        if ids is not None and ids[0] == id_to_find:
            print('I SAW IT')
            ret = aruco.estimatePoseSingleMarkers(corners,
                                                  marker_size,
                                                  cameraMatrix=cameraMatrix,
                                                  distCoeffs=cameraDistortion)
            (rvec, tvec) = (ret[0][0, 0, :], ret[1][0, 0, :])
            x = '{:.2f}'.format(tvec[0])
            y = '{:.2f}'.format(tvec[1])
            z = '{:.2f}'.format(tvec[2])

            y_sum = 0
            x_sum = 0

            x_sum = corners[0][0][0][0] + corners[0][0][1][0] + corners[0][0][
                2][0] + corners[0][0][3][0]
            y_sum = corners[0][0][0][1] + corners[0][0][1][1] + corners[0][0][
                2][1] + corners[0][0][3][1]

            x_avg = x_sum * .25
            y_avg = y_sum * .25

            x_ang = (x_avg - horizontal_res * .5) * (horizontal_fov /
                                                     horizontal_res)
            y_ang = (y_avg - vertical_res * .5) * (vertical_fov / vertical_res)

            ########### PID CONTROL ##########

            pid_x = PID(0.25, 0.5, 0.01, setpoint=x_ang)
            pid_y = PID(0.25, 0.5, 0.01, setpoint=y_ang)

            pid_x.setpoint = 0
            pid_y.setpoint = 0

            x_ang_control = pid_x(x_ang)
            y_ang_control = pid_y(y_ang)

            px, ix, dx = pid_x.components
            py, iy, dy = pid_y.components

            #PL COMTROL STUFF WITHOUT PID
            #F.send_land_message(x_ang,y_ang)

            #THIS IS THE MOST IMPORTANT THING HERE - PL WITH PID CONTROL
            send_land_message(-x_ang_control, -y_ang_control)
            ############################################################

            print("X CENTER PIXEL: " + str(x_avg) + " Y CENTER PIXEL: " +
                  str(y_avg))
            print("FOUND COUNT: " + str(found_count) + " NOTFOUND COUNT: " +
                  str(notfound_count))
            print("MARKER POSITION: x=" + x + " y= " + y + " z=" + z)
            found_count = found_count + 1
            print("")
            print(x_ang, y_ang)
            print(-x_ang_control, -y_ang_control)
            print("########################################################")
            return None
        else:
            notfound_count = notfound_count + 1
            print("FOUND COUNT: " + str(found_count) + " NOTFOUND COUNT: " +
                  str(notfound_count))
            return None
    except Exception as e:
        print('Target likely not found. Error: ' + str(e))
        return None
def detect_markers_and_draw(mtx, dist, frame, drawing_img):
    marker_size = 2.8/100   # marker length in meters
    axis_length = 0.015

    # get dictionary and parameters
    parameters = aruco.DetectorParameters_create()  # create parameters
    parameters.detectInvertedMarker = False  # enable detection of inverted markers

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # convert to gray

    # get marker data
    corners, ids, rejected_img_points = aruco.detectMarkers(gray, ARUCO_DICT, parameters=parameters)

    # draw corners on frame
    frame_m = aruco.drawDetectedMarkers(frame, corners, ids)

    if len(corners) == 4:

        # get corners of initial image
        y_max = frame.shape[0]
        x_max = frame.shape[1]
        frame_corners = [[0, 0], [x_max, 0], [0, y_max], [x_max, y_max]]

        # get transform corners
        transform_corners = np.zeros((len(ids), 2))
        for i in range(len(ids)):
            corner_num = ids[i][0]

            # get center x and y (calculating average)
            x, y = 0, 0
            for j in range(4):
                x += corners[i][0][j][0]
                y += corners[i][0][j][1]

            # add center to transform matrix
            transform_corners[corner_num] = [x/4, y/4]

        # transform drawing
        pts1 = np.float32(frame_corners)
        pts2 = np.float32(transform_corners)
        M = cv2.getPerspectiveTransform(pts1, pts2)
        drawing_img = cv2.warpPerspective(drawing_img, M, (drawing_img.shape[1], drawing_img.shape[0]))

        # create mask
        gray_square = cv2.cvtColor(drawing_img, cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(gray_square, 5, 255, cv2.THRESH_BINARY)
        mask_inv = cv2.bitwise_not(mask)

        # get background from frame
        # print(f'frame_m shape {frame_m.shape}')
        # print(f'mask shape {mask_inv.shape}')
        frame_bg = cv2.bitwise_and(frame_m, frame_m, mask=mask_inv)

        # take only drawing from drawing image
        drawing_img = cv2.bitwise_and(drawing_img, drawing_img, mask=mask)

        frame_m = cv2.add(frame_bg, drawing_img)

        # point_1 = (int(corners[0][0][0][0]), (corners[0][0][0][1]))
        # point_2 = (int(corners[1][0][0][0]), (corners[1][0][0][1]))
        # cv2.line(square_img, point_1, point_2, (255, 0, 0), 3)

    # estimate pose
    rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, marker_size, mtx, dist)
    if tvecs is not None:
        for i in range(len(tvecs)):
            frame_m = aruco.drawAxis(frame_m, mtx, dist, rvecs[i], tvecs[i], axis_length)

    return frame_m, rvecs, tvecs
    #-- Find all the aruco markers in the image
    #corners, ids, rejected = aruco.detectMarkers(image=gray, dictionary=aruco_dict, parameters=parameters, cameraMatrix=camera_matrix, distCoeff=camera_distortion)
    corners, ids, rejected = aruco.detectMarkers(image=gray, dictionary=aruco_dict, parameters=parameters)
    
    if ids is not None and ids[0] == id_to_find:
        droneDetected = True
        detectionCount += 1

        framesWithoutDrone = 0
        
        #-- ret = [rvec, tvec, ?]
        #-- array of rotation and position of each marker in camera frame
        #-- rvec = [[rvec_1], [rvec_2], ...]    attitude of the marker respect to camera frame
        #-- tvec = [[tvec_1], [tvec_2], ...]    position of the marker in camera frame
        ret = aruco.estimatePoseSingleMarkers(corners, marker_size, camera_matrix, camera_distortion)

        #-- Unpack the output, get only the first
        rvec, tvec = ret[0][0,0,:], ret[1][0,0,:]

        #-- Draw the detected marker and put a reference frame over it
        aruco.drawDetectedMarkers(frame, corners)
        #aruco.drawAxis(frame, camera_matrix, camera_distortion, rvec, tvec, 10)


        #-- Obtain the rotation matrix tag->camera
        R_ct    = np.matrix(cv2.Rodrigues(rvec)[0])
        R_tc    = R_ct.T

        #-- Get the attitude in terms of euler 321 (Needs to be flipped first)
        roll_marker, pitch_marker, yaw_marker = rotationMatrixToEulerAngles(R_flip*R_tc)
Example #30
0
    parameters = aruco.DetectorParameters_create()
    parameters.adaptiveThreshConstant = 10

    # lists of ids and the corners belonging to each id
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)

    # font for displaying text (below)
    font = cv2.FONT_HERSHEY_SIMPLEX

    # check if the ids list is not empty
    # if no check is added the code will crash
    if np.all(ids != None):

        # estimate pose of each marker and return the values
        # rvet and tvec-different from camera coefficients
        rvec, tvec ,_ = aruco.estimatePoseSingleMarkers(corners, marker_length, mtx, dist)
        #(rvec-tvec).any() # get rid of that nasty numpy value array error
        if rvec is None:
            continue
        for coor in range(rvec.shape[0]):
            
            for i in range(0, ids.size):
                # draw axis for the aruco markers
                print(tvec[i])
                cv2.putText(frame, "tvec" + "{0:.2f}".format(tvec[i][0][0])+" "+ "{0:.2f}".format(tvec[i][0][1])+ " " +"{0:.2f}".format(tvec[i][0][2]),(0,32), font, 1, (0,255,0),2,cv2.LINE_AA)
                #cv2.imwrite('raw/'+str(detections) + "good.png", frame)
                aruco.drawAxis(frame, mtx, dist, rvec[i], tvec[i], 0.1)
                #cv2.imwrite('axis/'+str(detections) + "with_axis.png", frame)
                detections += 1
        # draw a square around the markers
        aruco.drawDetectedMarkers(frame, corners)
Example #31
0
    def findMarker(self):
        self.rvecs_arr = np.zeros((100, 3, NUMBER))
        self.tvecs_arr = np.zeros((100, 3, NUMBER))
        QueryImg = None
        for order in range(NUMBER):
            # Capturing each frame of our video stream
            ret, QueryImg = self.cam.read()
            if ret == True:
                # grayscale image
                gray = cv2.cvtColor(QueryImg, cv2.COLOR_BGR2GRAY)

                # Detect Aruco markers
                corners, ids, _ = aruco.detectMarkers(
                    gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

                # Refine detected markers
                # Eliminates markers not part of our board, adds missing markers to the board
                # corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
                #         image = gray,
                #         board = board,
                #         detectedCorners = corners,
                #         detectedIds = ids,
                #         rejectedCorners = rejectedImgPoints,
                #         self.cameraMatrix = self.cameraMatrix,
                #         self.distCoeffs = self.distCoeffs)

                ###########################################################################
                # TODO: Add validation here to reject IDs/corners not part of a gridboard #
                ###########################################################################

                # Outline all of the markers detected in our image
                QueryImg = aruco.drawDetectedMarkers(QueryImg,
                                                     corners,
                                                     borderColor=(0, 0, 255))

                # Require 15 markers before drawing axis
                if ids is not None and len(ids) > 0:
                    # Estimate the posture of the gridboard, which is a construction of 3D space based on the 2D video
                    #pose, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, self.cameraMatrix, self.distCoeffs)
                    #if pose:
                    #    # Draw the camera posture calculated from the gridboard
                    #    QueryImg = aruco.drawAxis(QueryImg, self.cameraMatrix, self.distCoeffs, rvec, tvec, 0.3)
                    # Estimate the posture per each Aruco marker
                    self.rvecs, self.tvecs, _ = aruco.estimatePoseSingleMarkers(
                        corners, 0.02, self.cameraMatrix, self.distCoeffs)
                    for _id, rvec, tvec in zip(ids, self.rvecs, self.tvecs):
                        _id = _id[0]
                        for i in range(3):
                            self.rvecs_arr[_id][i][order] = rvec[0][i]
                            self.tvecs_arr[_id][i][order] = tvec[0][i]

                    #     QueryImg = aruco.drawAxis(QueryImg, self.cameraMatrix, self.distCoeffs, rvec, tvec, 0.02)

                # Display our image
                # cv2.imshow('QueryImage', QueryImg)
            cv2.waitKey(1)
        # print('self.rvecs_arr = ', self.rvecs_arr)
        # print('self.tvecs_arr = ', self.tvecs_arr)
        result = np.array(())
        r_avg = np.zeros(3)
        t_avg = np.zeros(3)
        for _id in range(100):
            ra = self.rvecs_arr[_id][0].nonzero()
            if len(ra[0]) < 0.3 * NUMBER:
                continue
            rb = self.rvecs_arr[_id][1].nonzero()
            rc = self.rvecs_arr[_id][2].nonzero()
            tx = self.tvecs_arr[_id][0].nonzero()
            ty = self.tvecs_arr[_id][1].nonzero()
            tz = self.tvecs_arr[_id][2].nonzero()
            ra = self.rvecs_arr[_id][0][ra]
            rb = self.rvecs_arr[_id][1][rb]
            rc = self.rvecs_arr[_id][2][rc]
            tx = self.tvecs_arr[_id][0][tx]
            ty = self.tvecs_arr[_id][1][ty]
            tz = self.tvecs_arr[_id][2][tz]
            ra = np.sort(ra, kind='quicksort')
            rb = np.sort(rb, kind='quicksort')
            rc = np.sort(rc, kind='quicksort')
            tx = np.sort(tx, kind='quicksort')
            ty = np.sort(ty, kind='quicksort')
            tz = np.sort(tz, kind='quicksort')
            r = np.array((ra, rb, rc))
            t = np.array((tx, ty, tz))
            ctn = False
            for i in range(3):
                rv, tv = r[i], t[i]

                while np.std(rv) > 0.01 and len(rv) >= NUMBER * 0.2:
                    if abs(rv[0] - np.average(rv)) > abs(rv[-1] -
                                                         np.average(rv)):
                        rv = np.delete(rv, 0)
                    else:
                        rv = np.delete(rv, -1)
                while np.std(tv) > 0.01 and len(tv) >= NUMBER * 0.2:
                    if abs(tv[0] - np.average(tv)) > abs(tv[-1] -
                                                         np.average(tv)):
                        tv = np.delete(tv, 0)
                    else:
                        tv = np.delete(tv, -1)
                if len(rv) < NUMBER * 0.2 or len(tv) < NUMBER * 0.2:
                    ctn = True
                    break
                r_avg[i] = np.average(rv)
                t_avg[i] = np.average(tv)
            if ctn:
                continue
            # print('[_id, r,t] = ', [_id, r,t])
            result = np.append(result, [_id, np.copy(r_avg), np.copy(t_avg)])

        result = result.reshape(int(len(result) / 3), 3)
        # print(result)
        for rst in result:
            QueryImg = aruco.drawAxis(QueryImg, self.cameraMatrix,
                                      self.distCoeffs, rst[1], rst[2], 0.02)
        cv2.imshow('QueryImage', QueryImg)
        return result
camera.set(cv2.CAP_PROP_FRAME_WIDTH,1280);
camera.set(cv2.CAP_PROP_FRAME_HEIGHT,960);

plt.figure()

cam_matrix = pickle.load(open("cam_matrix.p","rb"))
dist_matrix = pickle.load(open("dist_matrix.p","rb"))

while True:
        retval, frame = camera.read()
	gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        print(gray.shape)
	aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
	parameters = aruco.DetectorParameters_create()
	corners, ids, rejectedImgPoints = aruco.detectMarkers(gray,aruco_dict,parameters=parameters)
        if (len(corners) > 0):
            rot_vec, trans_vec, _ = aruco.estimatePoseSingleMarkers(corners,.1016,cam_matrix,dist_matrix);
            axis = np.float32([[4,0,0],[0,4,0],[0,0,-4]]).reshape(-1,3)
            imgpts, jac = cv2.projectPoints(axis,rot_vec,trans_vec,cam_matrix,dist_matrix);
            print(imgpts)
            frame = aruco.drawAxis(frame,cam_matrix,dist_matrix,rot_vec,trans_vec,.1)
            #frame = draw(frame,corners,imgpts)
            print("Rotation Vector:" )
            print(rot_vec)
            print ("translation vector: ")
            print(trans_vec)
            frame = cv2.putText(frame,"X: " + str(trans_vec[0][0][0]) + " Y: " + str(trans_vec[0][0][1]) + " Z: " + str(trans_vec[0][0][2]),(30,30),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),4)
        frame_markers = aruco.drawDetectedMarkers(frame.copy(),corners,ids)
	cv2.imshow("live video", frame_markers)
        cv2.waitKey(1)