Example #1
0
def augment_reality(path):
    #Load reference image and replacement image
    replacement_img = cv2.imread(path)
    board_img = cv2.imread("board_aruco.png")

    #Detect markers in reference image and get origin points
    board_img_gray = cv2.cvtColor(board_img, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()
    board_markers, board_ids, _ = aruco.detectMarkers(board_img_gray, aruco_dict, parameters=parameters)
    orig_points = np.asarray(board_markers).reshape((-1,2))

    # Using webcam
    cap = cv2.VideoCapture(0)
    while(True):
        ret, frame = cap.read() #640x480
        frame_copy = frame.copy()

        # Detecting markers in real time
        gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        parameters =  aruco.DetectorParameters_create()
        markers, ids, _ = aruco.detectMarkers(gray_image, aruco_dict, parameters=parameters)

        if (len(markers) == 0):
            print("Nenhum marcador encontrado.")
        else:
            dest_points = np.asarray(markers).reshape((-1,2))

            # Find the reference coordinate of the detected markers
            ids = np.asarray(ids)
            detected_orig_points = []
            for id in ids:
                index = np.argwhere(board_ids == id)[0][0]
                for i in range(4):
                    detected_orig_points.append(orig_points[(index*4)+i])
            detected_orig_points = np.asarray(detected_orig_points).astype(int)

            # Black image with replacement image over markers
            homography = cv2.findHomography(detected_orig_points, dest_points)[0]
            h, w, _ = frame.shape
            warped_image = cv2.warpPerspective(replacement_img, homography, (w, h), frame_copy)

            # Optional -- draw detected markers in image
            # aruco.drawDetectedMarkers(frame, markers)

            # Overlapping images
            mask = warped_image > [0, 0, 0]
            frame = np.where(mask == True, warped_image, frame)

        # Show image, press q to exit
        cv2.imshow('clean image', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Clean everything up
    cap.release()
    cv2.destroyAllWindows()
    def ARtag_Detection(self):
        if not self.camera_open:
            self.openCamera()
        print "Detecting AR tags..."
        currentImage = self.getCurrentImage()
        imageData = currentImage.data
        imageData = numpy.reshape(imageData, (800, 1280, 4))
        gray = cv2.cvtColor(imageData, cv2.COLOR_BGRA2GRAY)

        corners, ids, rejected = aruco.detectMarkers(gray, self._aruco_dict, parameters=self._arucoParams) 

        if ids is not None:
            Tmat = []
            IDS = []
            detectioninfo = RR.RobotRaconteurNode.s.NewStructure("BaxterCamera_interface.ARtagInfo")
            for anid in ids:
                IDS.append(anid[0])
            for corner in corners:
                pc, Rc = self.getObjectPose(corner) 
                Tmat.extend([   Rc[0][0],   Rc[1][0],   Rc[2][0],   0.0,
                                Rc[0][1],   Rc[1][1],   Rc[2][1],   0.0,
                                Rc[0][2],   Rc[1][2],   Rc[2][2],   0.0,
                                pc[0],      pc[1],      pc[2],      1.0])

            detectioninfo.tmats = Tmat
            detectioninfo.ids = IDS
            return detectioninfo
def detect():
    cameraMatrix = np.array([[8.2177218160147447e+02, 0., 3.4694289806342221e+02],[0., 8.2177218160147447e+02, 2.4795144956871457e+02],[0., 0., 1.]])
    distCoeffs = np.array([4.4824308523616324e-02, -7.4951985348854000e-01, 3.7539708742088725e-03, 8.8931335565222442e-03, 3.7214188475390984e+00])
    #型宣言
    rvecs = np.array([])
    tvecs = np.array([])
    
    # Capture frame-by-frame
    ret, frame = cap.read()
    #print(frame.shape) #480x640
    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()
    #print(parameters)
 
    '''    detectMarkers(...)
        detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
        mgPoints]]]]) -> corners, ids, rejectedImgPoints
        '''
        #lists of ids and the corners beloning to each id
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
    #print(corners)
 
    #It's working.
    # my problem was that the cellphone put black all around it. The alrogithm
    # depends very much upon finding rectangular black blobs

    if (corners != []): 
        rvecs, tvecs = aruco.estimatePoseSingleMarkers(corners, 0.05, cameraMatrix, distCoeffs)  
        frame = aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvecs, tvecs, 0.1)
        frame = aruco.drawDetectedMarkers(frame, corners, ids)
    return frame, rvecs, tvecs
	def getAndSearchImage(self):
		global drone
		global thread_lock_camera_frame
		global render_frame
		global new_frame
		global W
		global H
		try:
			# print pygame.image
			pixelarray = drone.get_image()
			if pixelarray != None:
				frame = pixelarray[:,:,::-1].copy()
				#resize image
				resized=cv2.resize(frame,(W,H))
				# aruco detection
				gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
				aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
				parameters =  aruco.DetectorParameters_create()
		
				#lists of ids and the corners beloning to each id
				corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
				
				# iterate over all found markers to determine the one to follow (biggest one)
				selected_corners = []
				max_index = (0,0)
				counter = 0
				if len(corners) > 0:
					dim = corners[0].shape
					#print dim
					while counter<dim[0]:
						tmp_corners = ((corners[0])[counter])
						# A=(1/2)|[(x3-x1)(y4-y2) +(x4-x2)(y1-y3)]|
						area = 0.5*((tmp_corners[2][0] - tmp_corners[0][0]) * (tmp_corners[3][1] - tmp_corners[1][1]) + (tmp_corners[3][0] - tmp_corners[1][0]) * (tmp_corners[0][1] - tmp_corners[2][1]))
						if area > max_index[0]:
							max_index = (area,counter)
						counter +=1

					max_corners = ((corners[0])[max_index[1]])
					selected_corners = np.array([np.array([(corners[0])[max_index[1]]],dtype=np.float32)])#[max_index[0]*4:max_index[0]*4+3]
					
				# draw all markers
				display = aruco.drawDetectedMarkers(resized, corners)

				thread_lock_camera_frame.acquire()
				render_frame = display
				new_frame = True
				thread_lock_camera_frame.release()

				# prepare function output
				if len(selected_corners) > 0:
					x,y = max_corners.sum(axis=0)/4
					area = max_index[0]
				else:
					x = -W
					y = -1
					area = -1
				return (x-W/2,y-H/2), area
		except:
			pass
Example #5
0
def getPose(im, board, m, d):
    if len(im.shape) > 2:
        gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    else:
        gray = im
    # parameters = aruco.DetectorParameters_create()
    # corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary, parameters=parameters)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary)
    ret, chcorners, chids = aruco.interpolateCornersCharuco(corners, ids, gray, board)
    ret, rvecs, tvecs = aruco.estimatePoseCharucoBoard(chcorners, chids, board,m,d)
    im = aruco.drawAxis(im, m, d, rvecs, tvecs,0.5)
    cv2.imshow('markers', im)
    cv2.waitKey()
 def analyze_image(self):
     gray = cv.cvtColor(self.img, cv.COLOR_RGB2GRAY)
     all_corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.aruco_dict, parameters=self.parameters)
     if all_corners:
         self.corners = all_corners
         corners = all_corners[0][0]
         dim = corners.shape
         c = np.sum(corners, axis=0) / dim[0]
         self.center = (c[0], c[1])
         x1, y1 = corners[0][0], corners[0][1]
         x2, y2 = corners[1][0], corners[1][1]
         x3, y3 = corners[2][0], corners[2][1]
         x4, y4 = corners[3][0], corners[3][1]
         # A = (1 / 2) | [(x3 - x1)(y4 - y2) + (x4 - x2)(y1 - y3)] |
         self.marker_size = math.sqrt(abs((x3 - x1) * (y4 - y2) + (x4 - x2) * (y1 - y3)) / 2)
     else:
         self.corners = None
         self.center = (-1, -1)
         self.marker_size = 0
Example #7
0
def detect():
    # camera: c270
    cameraMatrix = np.array([[8.2177218160147447e+02, 0.0                   , 3.4694289806342221e+02],
                             [0.0                   , 8.2177218160147447e+02, 2.4795144956871457e+02],
                             [0.0                   , 0.0                   , 1.0                   ]])
    distCoeffs = np.array([4.4824308523616324e-02, -7.4951985348854000e-01, 3.7539708742088725e-03, 8.8931335565222442e-03, 3.7214188475390984e+00])

    rvecs = np.array([])
    tvecs = np.array([])
    
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)

    if (corners != []): 
        rvecs, tvecs = aruco.estimatePoseSingleMarkers(corners, 0.05, cameraMatrix, distCoeffs)
        """
        frame = aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvecs, tvecs, 0.1)
        frame = aruco.drawDetectedMarkers(frame, corners, ids)
        """
        
    return frame, rvecs, tvecs
Example #8
0
while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    #print(frame.shape) #480x640
    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
    parameters = aruco.DetectorParameters_create()

    #print(parameters)
    '''    detectMarkers(...)
        detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
        mgPoints]]]]) -> corners, ids, rejectedImgPoints
        '''
    #lists of ids and the corners beloning to each id
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=parameters)
    #print ids, type(ids)
    print ids, type(ids)
    print corners[0], type(corners[0])
    #print corners[0], type(corners[0])
    for marker in range(len(ids)):
        print ids.item(marker)
        print np.array(corners[marker].tolist())
        print "shape", corners[marker].shape
        for fourpoints in range(4):
            print corners[marker].item(0, fourpoints, 0), corners[marker].item(
                0, fourpoints, 1)
        print
    print "------"

    #It's working.
    def track(self, matrix_coefficients, distortion_coefficients):
        global pressedKey, needleComposeRvec, needleComposeTvec, ultraSoundComposeRvec, ultraSoundComposeTvec
        """ Real time ArUco marker tracking.  """
        TcomposedRvecNeedle, TcomposedTvecNeedle = None, None
        TcomposedRvecUltrasound, TcomposedTvecUltrasound = None, None

        # Behaviour is a key between calibration types.
        # No simulation is equal to 0
        # Needle Calibration is equal to 1
        # Ultrasound Calibration is equal to 2
        # Press
        behaviour = 0
        try:
            while True:
                # No marker detected
                isCalibrationMarkerDetected = False
                isNeedleDetected = False
                isUltraSoundDetected = False

                # Get the next frame to process
                ret, frame = cap.read()
                # operations on the frame come here
                gray = cv2.cvtColor(frame,
                                    cv2.COLOR_BGR2GRAY)  # Change grayscale
                aruco_dict = aruco.Dictionary_get(
                    aruco.DICT_5X5_250)  # Use 5x5 dictionary to find markers
                parameters = aruco.DetectorParameters_create(
                )  # Marker detection parameters

                # lists of ids and the corners beloning to each id
                corners, ids, rejected_img_points = aruco.detectMarkers(
                    gray,
                    aruco_dict,
                    parameters=parameters,
                    cameraMatrix=matrix_coefficients,
                    distCoeff=distortion_coefficients)

                # Get the behaviour and update the gui
                if behaviour == 0:
                    self.calibrationType.emit('Simulation')
                elif behaviour == 1:
                    self.calibrationType.emit('Needle calibration')
                else:
                    self.calibrationType.emit('Ultrasound calibration')
                    pass

                if np.all(ids is not None
                          ):  # If there are markers found by detector
                    # sort the markers
                    zipped = zip(ids, corners)
                    ids, corners = zip(*(sorted(zipped)))
                    # 4 axis for ultrasound, 2axis for needle
                    axisForFourPoints = np.float32([[-0.02, -0.02, 0],
                                                    [-0.02, 0.02, 0],
                                                    [0.02, -0.02, 0],
                                                    [0.02, 0.02, 0]]).reshape(
                                                        -1,
                                                        3)  # axis for a plan
                    axisForTwoPoints = np.float32([[0.01, 0, 0],
                                                   [-0.01, 0, 0]]).reshape(
                                                       -1,
                                                       3)  # axis for a line

                    for i in range(0, len(ids)):  # Iterate in markers
                        # Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
                        rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(
                            corners[i], 0.02, matrix_coefficients,
                            distortion_coefficients)

                        if ids[i] == calibrationMarkerID:
                            calibrationRvec = rvec
                            calibrationTvec = tvec
                            isCalibrationMarkerDetected = True
                            calibrationMarkerCorners = corners[i]
                        elif ids[i] == needleMarkerID:
                            needleRvec = rvec
                            needleTvec = tvec
                            isNeedleDetected = True
                            needleCorners = corners[i]
                        elif ids[i] == ultraSoundMarkerID:
                            ultraSoundRvec = rvec
                            ultraSoundTvec = tvec
                            isUltraSoundDetected = True
                            ultrasoundCorners = corners[i]

                        (rvec - tvec).any(
                        )  # get rid of that nasty numpy value array error
                        # aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01)
                        frame = aruco.drawDetectedMarkers(
                            frame, corners)  # Draw A square around the markers

                    if isNeedleDetected and needleComposeRvec is not None and needleComposeTvec is not None:
                        info = cv2.composeRT(needleComposeRvec,
                                             needleComposeTvec, needleRvec.T,
                                             needleTvec.T)
                        TcomposedRvecNeedle, TcomposedTvecNeedle = info[
                            0], info[1]
                        imgpts, jac = cv2.projectPoints(
                            axisForTwoPoints, TcomposedRvecNeedle,
                            TcomposedTvecNeedle, matrix_coefficients,
                            distortion_coefficients)
                        frame = draw(frame, imgpts, (200, 200, 220))

                    if isUltraSoundDetected and ultraSoundComposeRvec is not None and ultraSoundComposeTvec is not None:
                        info = cv2.composeRT(ultraSoundComposeRvec,
                                             ultraSoundComposeTvec,
                                             ultraSoundRvec.T,
                                             ultraSoundTvec.T)
                        TcomposedRvecUltrasound, TcomposedTvecUltrasound = info[
                            0], info[1]
                        imgpts, jac = cv2.projectPoints(
                            axisForFourPoints, TcomposedRvecUltrasound,
                            TcomposedTvecUltrasound, matrix_coefficients,
                            distortion_coefficients)
                        frame = draw(frame, imgpts, (60, 200, 50))

                    # If the both markers can be seen by the camera, print the xyz differences between them
                    # So it will guide the user
                    if isNeedleDetected and needleComposeRvec is not None and needleComposeTvec is not None and \
                        isUltraSoundDetected and ultraSoundComposeRvec is not None and ultraSoundComposeTvec is not None:
                        xdiff = round((TcomposedTvecNeedle[0] -
                                       TcomposedTvecUltrasound[0])[0], 3)
                        ydiff = round((TcomposedTvecNeedle[1] -
                                       TcomposedTvecUltrasound[1])[0], 3)
                        zdiff = round((TcomposedTvecNeedle[2] -
                                       TcomposedTvecUltrasound[2])[0], 3)
                        self.changeXDiff.emit('X difference:' + str(xdiff))
                        self.changeYDiff.emit('Y difference:' + str(ydiff))
                        self.changeZDiff.emit('Z difference:' + str(zdiff))

                # Display the resulting frame
                rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1],
                                           rgbImage.shape[0],
                                           QImage.Format_RGB888)
                p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
                self.changePixmap.emit(p)

                # Key handling. For marker calibration, saving marker etc.
                if pressedKey is None:  # No key pressed
                    pass
                elif pressedKey == Qt.Key_C:  # Button for calibration.
                    if ids is not None and len(
                            ids
                    ) > 1:  # If there are two markers, reverse the second and get the difference
                        if isNeedleDetected and isCalibrationMarkerDetected and behaviour == 1:
                            needleComposeRvec, needleComposeTvec = relativePosition(
                                calibrationRvec, calibrationTvec, needleRvec,
                                needleTvec)
                            savedNeedleRvec, savedNeedleTvec = needleComposeRvec, needleComposeTvec
                        elif isUltraSoundDetected and isCalibrationMarkerDetected and behaviour == 2:
                            ultraSoundComposeRvec, ultraSoundComposeTvec = relativePosition(
                                calibrationRvec, calibrationTvec,
                                ultraSoundRvec, ultraSoundTvec)
                            savedUltraSoundRvec, savedUltraSoundTvec = ultraSoundComposeRvec, ultraSoundComposeTvec
                elif pressedKey == Qt.Key_U:  # Button for moving z axis 1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[0], [0],
                                                                 [0.001]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            0
                        ], [0], [0.001]]
                elif pressedKey == Qt.Key_D:  # Button for moving z axis -1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[0], [0],
                                                                 [-0.001]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            0
                        ], [0], [-0.001]]
                elif pressedKey == Qt.Key_R:  # Button for moving x axis 1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[0.001], [0],
                                                                 [0]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            0.001
                        ], [0], [0]]
                elif pressedKey == Qt.Key_L:  # Button for moving x axis -1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[-0.001], [0],
                                                                 [0]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            -0.001
                        ], [0], [0]]
                elif pressedKey == Qt.Key_B:  # Button for moving y axis -1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[0], [-0.001],
                                                                 [0]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            0
                        ], [-0.001], [0]]
                elif pressedKey == Qt.Key_F:  # Button for moving y axis 1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[0], [0.001],
                                                                 [0]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            0
                        ], [0.001], [0]]
                elif pressedKey == Qt.Key_P:  # print necessary information here, for debug
                    pass  # Insert necessary print here
                elif pressedKey == Qt.Key_S:  # Save the calibration vectors to a file.
                    if (needleComposeRvec is not None
                            and needleComposeTvec is not None
                            and ultraSoundComposeRvec is not None
                            and ultraSoundComposeTvec is not None):
                        print(needleComposeRvec, needleComposeTvec,
                              ultraSoundComposeRvec, ultraSoundComposeTvec)
                        fileObject = open(save_Name, 'wb')
                        data = [
                            needleComposeRvec, needleComposeTvec,
                            ultraSoundComposeRvec, ultraSoundComposeTvec
                        ]
                        pickle.dump(data, fileObject)
                        fileObject.close()
                elif pressedKey == Qt.Key_X:  # change simulation type, "Simulation, needle calibration, ultrasound calibration"
                    behaviour = (behaviour + 1) % 3
                    # print(behaviour)
                pressedKey = None

        except Exception:
            pass
        finally:
            # When everything done, release the capture
            cap.release()

        return frame
Example #10
0
    # Capture frame-by-frame

    (ret, frame) = cap.read()

    # frame = frame[300:640,100:560]

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow('original', frame)

    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    cv2.imshow('grayed', gray)

    # ret3,gray = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    # cv2.imshow("thresholded", gray)

    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, dictionary, parameters=arucoParams)  # Detect aruco
    aruco.refineDetectedMarkers(gray, board, corners, ids, rejectedImgPoints)
    if ids is not None:  # if the aruco marker detected

        # rvec, tvec, objpoints = aruco.estimatePoseSingleMarkers(corners, markerLength, camera_Matrix, dist_Coff) # For a single marker
        # board = aruco.Board_create(objpoints,dictionary,ids)
        imgWithAruco = aruco.drawDetectedMarkers(frame, corners, ids,
                                                 (0, 255, 0))
        retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board,
                                                     camera_Matrix, dist_Coff)
        imgWithAruco = aruco.drawAxis(frame, camera_Matrix, dist_Coff, rvec,
                                      tvec, 0.1)
        # imgWithArucwo = aruco.drawAxis(imgWithAruco, camera_Matrix, dist_Coff, rvec, tvec, 10) #balash weghet nazar abo balash katar meno # axis length 100 can be changed according to your requirement

        x_dist = round(tvec[0][0] * 100, 1)
        y_dist = round(tvec[1][0] * 100, 1)
Example #11
0
R_flip[2, 2] = -1.0

#--- start imutils fps counter
fps = FPS().start()

#--- LOOP - capture frames from the camera
for frame_pi in camera.capture_continuous(rawCapture,
                                          format="bgr",
                                          use_video_port=True):
    frame = frame_pi.array
    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    #-- Find all the aruco markers in the image
    corners, ids, rejected = aruco.detectMarkers(image=frame,
                                                 dictionary=aruco_dict,
                                                 parameters=parameters,
                                                 cameraMatrix=camera_matrix,
                                                 distCoeff=camera_distortion)

    if ids is not None:

        ret = aruco.estimatePoseSingleMarkers(corners, marker_size,
                                              camera_matrix, camera_distortion)
        rvec, tvec = ret[0], ret[1]

        aruco.drawDetectedMarkers(frame, corners)

        i = 0
        for id in ids_to_find:

            id_pos = np.where(ids == id)
Example #12
0
 def detect_markers(self, frame) -> ArucoDetections:
     corners, ids, rejected = aruco.detectMarkers(
         frame, dictionary=self._aruco_dict)
     return ArucoDetections(corners, ids, rejected)
Example #13
0
def callback(status):
    #agv_stopped = status.data
    print('Received status from AGV')


sub = rospy.Subscriber('/AGV_status', Float64MultiArray, callback)

while True:
    # Saves images from the video stream and converts it into a grayscale image
    # ret is currently unused (check if removable)
    ret, frame = video_stream.read()
    gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Get the corner positions of each (visible) marker in the image
    corners, ids, rejected = aruco.detectMarkers(image=gray_img, dictionary=aruco_dict, parameters=detector_parameters)

    if (ids is not None): 

         for i in range(0, len(ids)):
              # Calculates the markers positions and places each position in an array
              result = aruco.estimatePoseSingleMarkers(corners[i], marker_size, calibMatrix, distMatrix)
              rvec, tvec = result[0][0,0,:], result[1][0,0,:]
              #print("Marker found, printing coordinates for id = " + str(ids[i]))
              #print("x-position = ", round(tvec[0], 1), ", y-position = ", round(tvec[1], 1), ", z-position = ", round(tvec[2], 1))

              
              aruco.drawDetectedMarkers(frame, corners)
              #aruco.drawAxis(frame, calibMatrix, distMatrix, rvec, tvec, 10)

def roda_todo_frame(imagem):
    global centro_yellow
    global m
    global angle_yellow
    global creeper_vermelho
    global creeper_verde
    global creeper_azul
    global dic_creepers
    global ids
    global id_encontrado

    try:
        cv_image = bridge.compressed_imgmsg_to_cv2(imagem, "bgr8")
        ##
        copia = cv_image.copy()  # se precisar usar no while

        if frame % skip == 0:  # contamos a cada skip frames
            mask = projeto_utils.filter_color(copia, low, high)
            img, centro_yellow = projeto_utils.center_of_mass_region(
                mask, 200, 300, mask.shape[1], mask.shape[0])

            #----------------------------------------- CREEPERS ----------------------------------------#
            creeper_vermelho, creeper_verde, creeper_azul = projeto_utils.identifica_creeper(
                cv_image)
            dic_creepers = {
                'azul': creeper_azul,
                'vermelho': creeper_vermelho,
                'verde': creeper_verde
            }
            #-------------------------------------------------------------------------------------------#

            #----------------------------------------- REGRESSAO ---------------------------------------#
            saida_bgr, m, h = projeto_utils.ajuste_linear_grafico_x_fy(mask)
            ang = math.atan(m)
            ang_deg = math.degrees(ang)
            angle_yellow = ang_deg
            #-------------------------------------------------------------------------------------------#

            #----------------------------------------- ARUCO -------------------------------------------#
            gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray, aruco_dict, parameters=parameters)

            # verificando se o id encontrado é o desejado
            try:
                for id in ids:
                    if id_to_find == int(id[0]):
                        id_encontrado = True
            except:
                pass

            ######################################## ARUCO #############################################

            if ids is not None:
                ret = aruco.estimatePoseSingleMarkers(corners, marker_size,
                                                      camera_matrix,
                                                      camera_distortion)
                rvec, tvec = ret[0][0, 0, :], ret[1][0, 0, :]

                #-- Desenha um retanculo e exibe Id do marker encontrado
                aruco.drawDetectedMarkers(cv_image, corners, ids)
                aruco.drawAxis(cv_image, camera_matrix, camera_distortion,
                               rvec, tvec, 1)

                #-- Print tvec vetor de tanslação em x y z
                str_position = "Marker x=%4.0f  y=%4.0f  z=%4.0f" % (
                    tvec[0], tvec[1], tvec[2])
                print(str_position)
                cv2.putText(cv_image, str_position, (0, 100), font, 1,
                            (0, 255, 0), 1, cv2.LINE_AA)

                #####################---- Distancia Euclidiana ----#####################
                # Calcula a distancia usando apenas a matriz tvec, matriz de tanslação
                # Pode usar qualquer uma das duas formas
                distance = np.sqrt(tvec[0]**2 + tvec[1]**2 + tvec[2]**2)
                distancenp = np.linalg.norm(tvec)

                #-- Print distance
                str_dist = "Dist aruco=%4.0f  dis.np=%4.0f" % (distance,
                                                               distancenp)
                print(str_dist)
                cv2.putText(cv_image, str_dist, (0, 15), font, 1, (0, 255, 0),
                            1, cv2.LINE_AA)

                #####################---- Distancia pelo foco ----#####################

                # raspicam v2 focal legth
                FOCAL_LENGTH = 3.6  #3.04
                # pixel por unidade de medida
                m = (camera_matrix[0][0] / FOCAL_LENGTH +
                     camera_matrix[1][1] / FOCAL_LENGTH) / 2
                # corners[0][0][0][0] = [ID][plano?][pos_corner(sentido horario)][0=valor_pos_x, 1=valor_pos_y]
                pixel_length1 = math.sqrt(
                    math.pow(corners[0][0][0][0] - corners[0][0][1][0], 2) +
                    math.pow(corners[0][0][0][1] - corners[0][0][1][1], 2))
                pixel_length2 = math.sqrt(
                    math.pow(corners[0][0][2][0] - corners[0][0][3][0], 2) +
                    math.pow(corners[0][0][2][1] - corners[0][0][3][1], 2))
                pixlength = (pixel_length1 + pixel_length2) / 2
                dist = marker_size * FOCAL_LENGTH / (pixlength / m)

                #-- Print distancia focal
                str_distfocal = "Dist focal=%4.0f" % (dist)
                print(str_distfocal)
                cv2.putText(cv_image, str_distfocal, (0, 30), font, 1,
                            (0, 255, 0), 1, cv2.LINE_AA)

                ####################--------- desenha o cubo -----------#########################
                m = marker_size / 2
                pts = np.float32([[-m, m, m], [-m, -m, m], [m, -m,
                                                            m], [m, m, m],
                                  [-m, m, 0], [-m, -m, 0], [m, -m, 0],
                                  [m, m, 0]])
                imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix,
                                              camera_distortion)
                imgpts = np.int32(imgpts).reshape(-1, 2)
                cv_image = cv2.drawContours(cv_image, [imgpts[:4]], -1,
                                            (0, 0, 255), 4)
                for i, j in zip(range(4), range(4, 8)):
                    cv_image = cv2.line(cv_image, tuple(imgpts[i]),
                                        tuple(imgpts[j]), (0, 0, 255), 4)
                cv_image = cv2.drawContours(cv_image, [imgpts[4:]], -1,
                                            (0, 0, 255), 4)
            #-------------------------------------------------------------------------------------------#

            projeto_utils.texto(cv_image,
                                f"Distancia obstaculo: {distancia}", (15, 50),
                                color=(0, 0, 255))

            cv2.imshow("Camera", cv_image)
            cv2.imshow("Creeper", dic_creepers[cor])

        cv2.waitKey(1)
    except CvBridgeError as e:
        print('ex', e)
Example #15
0
    def run(self):
        cap, resolution = init_cv()
        t = threading.current_thread()
        cf, URI = init_cf()

        if cf is None:
            print('Not running cf code.')
            return

        aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
        parameters = aruco.DetectorParameters_create()
        font = cv2.FONT_HERSHEY_PLAIN
        id2find = [0, 1]
        marker_size = [0.112, 0.0215]  # 0.1323
        ids_seen = [0, 0]
        id2follow = 0
        zvel = 0.0
        landMode = False

        camera_matrix, camera_distortion, _ = loadCameraParams('runcam_nano3')

        with SyncCrazyflie(URI, cf=Crazyflie(rw_cache='./cache')) as scf:
            with open("ugly_log.txt", "a") as file:
                lgr = UglyLogger(URI, scf, file)
                # We take off when the commander is created
                with MotionCommander(scf) as mc:
                    mc.up(0.1, 0.5)

                    while not self._stopevent.isSet():

                        ret, frame = cap.read()

                        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                        corners, ids, rejected = aruco.detectMarkers(
                            image=gray,
                            dictionary=aruco_dict,
                            parameters=parameters,
                            cameraMatrix=camera_matrix,
                            distCoeff=camera_distortion)

                        if ids is not None and len(ids) > 0:

                            #-- Draw the detected marker and put a reference frame over it
                            aruco.drawDetectedMarkers(frame, corners)

                            #-- Calculate which marker has been seen most at late
                            if 0 in ids:
                                ids_seen[0] += 1
                            else:
                                ids_seen[0] = 0

                            if 1 in ids:
                                ids_seen[1] += 2
                            else:
                                ids_seen[1] = 0

                            id2follow = np.argmax(ids_seen)
                            idx_r, idx_c = np.where(ids == id2follow)

                            #-- Extract the id to follow
                            corners = np.asarray(corners)
                            corners = corners[idx_r, :]

                            #-- Estimate poses of detected markers
                            rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                                corners, marker_size[id2follow], camera_matrix,
                                camera_distortion)

                            #-- Unpack the output, get only the first
                            rvec, tvec = rvecs[0, 0, :], tvecs[0, 0, :]

                            # Draw the detected markers axis
                            for i in range(len(rvecs)):
                                aruco.drawAxis(frame, camera_matrix,
                                               camera_distortion,
                                               rvecs[i, 0, :], tvecs[i, 0, :],
                                               marker_size[id2follow] / 2)

                            #-- Obtain the rotation matrix tag->camera
                            R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
                            R_tc = R_ct.T

                            #-- Now get Position and attitude of the camera respect to the marker
                            if id2follow == 0:
                                pos_camera = -R_tc * (np.matrix(tvec).T -
                                                      T_slide)
                            else:
                                pos_camera = -R_tc * (np.matrix(tvec).T)

                            str_position = "Position error: x=%4.4f  y=%4.4f  z=%4.4f" % (
                                pos_camera[0], pos_camera[1], pos_camera[2])
                            cv2.putText(frame, str_position, (0, 20), font, 1,
                                        uglyConst.BLACK, 2, cv2.LINE_AA)

                            #-- Get the attitude of the camera respect to the frame
                            roll_camera, pitch_camera, yaw_camera = rotationMatrixToEulerAngles(
                                R_flip * R_tc)
                            att_camera = [
                                math.degrees(roll_camera),
                                math.degrees(pitch_camera),
                                math.degrees(yaw_camera)
                            ]

                            str_attitude = "Anglular error: roll=%4.4f  pitch=%4.4f  yaw (z)=%4.4f" % (
                                att_camera[0], att_camera[1], att_camera[2])
                            cv2.putText(frame, str_attitude, (0, 40), font, 1,
                                        uglyConst.BLACK, 2, cv2.LINE_AA)

                            drawHUD(frame, resolution, yaw_camera)

                            pos_flip = np.array([[-pos_camera.item(1)],
                                                 [pos_camera.item(0)]])
                            cmd_flip = np.array(
                                [[np.cos(yaw_camera), -np.sin(yaw_camera)],
                                 [np.sin(yaw_camera),
                                  np.cos(yaw_camera)]])
                            pos_cmd = cmd_flip.dot(
                                pos_flip)  # cmd_flip*pos_flip

                            print('Following tag ', id2follow,
                                  ' with position ', pos_cmd[0], pos_cmd[1])

                            flowHeight = lgr.getHeight()
                            print('Height is', flowHeight)

                            #-- If far away or big angle, move/turn closer at constant height
                            if (np.sqrt(pos_cmd[0] * pos_cmd[0] +
                                        pos_cmd[1] * pos_cmd[1]) > 0.05) or (
                                            np.abs(pos_camera.item(2) > 1.0)):
                                print('1. Far away, moving closer')
                                mc._set_vel_setpoint(
                                    pos_cmd[0] * uglyConst.Kx,
                                    pos_cmd[1] * uglyConst.Ky, 0.0,
                                    -att_camera[2] * uglyConst.Kyaw)
                            #-- If on the ground, "maintain" height and adjust
                            elif flowHeight < 40:
                                print('4. Landed')
                                mc._set_vel_setpoint(
                                    pos_cmd[0] * uglyConst.Kx,
                                    pos_cmd[1] * uglyConst.Ky, -0.2,
                                    -att_camera[2] * uglyConst.Kyaw)
                                #-- If really close, shut down motors
                                if (np.sqrt(pos_cmd[0] * pos_cmd[0] +
                                            pos_cmd[1] * pos_cmd[1]) < 0.01):
                                    mc.stop()
                                    print('5. Close!')
                                    break
                            #-- If close to the ground and ready 2 land, move down faster
                            elif flowHeight < 200 and (
                                    np.sqrt(pos_cmd[0] * pos_cmd[0] +
                                            pos_cmd[1] * pos_cmd[1]) > 0.01):
                                mc.stop()
                                break
                                print(
                                    '3. Within ground effect, moving down fast'
                                )
                                mc._set_vel_setpoint(
                                    pos_cmd[0] * uglyConst.Kx,
                                    pos_cmd[1] * uglyConst.Ky, -0.2,
                                    -att_camera[2] * uglyConst.Kyaw)

                            #-- Otherwise, move down
                            else:
                                print('2. Close, moving down')
                                mc._set_vel_setpoint(
                                    pos_cmd[0] * uglyConst.Kx,
                                    pos_cmd[1] * uglyConst.Ky, -0.05,
                                    -att_camera[2] * uglyConst.Kyaw)
                                landMode = True

                        cv2.imshow('frame', frame)

                        key = cv2.waitKey(1) & 0xFF
                        if key == ord('q'):
                            cap.release()
                            cv2.destroyAllWindows()
                        elif key == ord('w'):
                            zvel += 0.1
                        elif key == ord('s'):
                            zvel -= 0.1

                    # We land when the commander goes out of scope

                    print('Landing...')

        print('Stopping cf_thread')
Example #16
0
    def run(self):
        aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
        parameters = aruco.DetectorParameters_create()
        font = cv2.FONT_HERSHEY_PLAIN

        camera_matrix, camera_distortion, _ = loadCameraParams('webcam')

        cap, resolution = init_cv()

        while True:
            ret, frame = cap.read()

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            corners, ids, rejected = aruco.detectMarkers(
                image=gray,
                dictionary=aruco_dict,
                parameters=parameters,
                cameraMatrix=camera_matrix,
                distCoeff=camera_distortion)

            if ids is not None:
                #-- Estimate poses of detected markers
                rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                    corners, marker_size, camera_matrix, camera_distortion)

                #-- Unpack the output, get only the first
                rvec, tvec = rvecs[0, 0, :], tvecs[0, 0, :]

                #-- Draw the detected marker and put a reference frame over it
                aruco.drawDetectedMarkers(frame, corners)

                # Draw the detected markers axis
                for i in range(len(rvecs)):
                    aruco.drawAxis(frame, camera_matrix, camera_distortion,
                                   rvecs[i, 0, :], tvecs[i, 0, :], 0.1)

                #-- Obtain the rotation matrix tag->camera
                R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
                R_tc = R_ct.T

                #-- Now get Position and attitude of the camera respect to the marker
                pos_camera = -R_tc * np.matrix(tvec).T
                str_position = "Position error: x=%4.4f  y=%4.4f  z=%4.4f" % (
                    pos_camera[0], pos_camera[1], pos_camera[2])
                cv2.putText(frame, str_position, (0, 20), font, 1,
                            uglyConst.BLACK, 2, cv2.LINE_AA)

                #-- Get the attitude of the camera respect to the frame
                roll_camera, pitch_camera, yaw_camera = rotationMatrixToEulerAngles(
                    R_flip * R_tc)
                str_attitude = "Anglular error: roll=%4.4f  pitch=%4.4f  yaw (z)=%4.4f" % (
                    math.degrees(roll_camera), math.degrees(pitch_camera),
                    math.degrees(yaw_camera))
                cv2.putText(frame, str_attitude, (0, 40), font, 1,
                            uglyConst.BLACK, 2, cv2.LINE_AA)

                drawHUD(frame, resolution, yaw_camera)

                self.ctrl_message.errorx = pos_camera[0]
                self.ctrl_message.errory = pos_camera[1]
                self.ctrl_message.errorz = pos_camera[2]
                self.ctrl_message.erroryaw = math.degrees(yaw_camera)

            cv2.imshow('frame', frame)

            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                cap.release()
                cv2.destroyAllWindows()
Example #17
0
def collect_charuco_detections(capture, aruco_dict, charuco_board):
    # Create the arrays and variables we'll use to store info like corners and IDs from images processed
    corners_all = []  # Corners discovered in all images processed
    ids_all = []  # Aruco ids corresponding to corners discovered
    image_size = None  # Determined at runtime

    good = 0

    def show(frame):
        # resize
        proportion = max(frame.shape) / 800.0
        im = cv2.resize(frame, (int(
            frame.shape[1] / proportion), int(frame.shape[0] / proportion)))

        # add text
        txt = f'good frames so far: {good}'
        im = cv2.putText(im, txt, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                         (255, 0, 0), 2, cv2.LINE_AA)

        # show the debug image
        cv2.imshow('calibration', im)

    while True:
        more, img = capture.read()
        if not more:
            break

        # Grayscale the image
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find aruco markers in the query image
        corners, ids, _ = aruco.detectMarkers(image=gray,
                                              dictionary=aruco_dict)

        # Outline the aruco markers found in our query image
        img = aruco.drawDetectedMarkers(image=img, corners=corners)

        try:
            # Get charuco corners and ids from detected aruco markers
            response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(
                markerCorners=corners,
                markerIds=ids,
                image=gray,
                board=charuco_board)

            # If a Charuco board was found, let's collect image/corner points
            # Requiring at least 20 squares
            if response > 20:
                # Add these corners and ids to our calibration arrays
                corners_all.append(charuco_corners)
                ids_all.append(charuco_ids)

                # Draw the Charuco board we've detected to show our calibrator the board was properly detected
                img = aruco.drawDetectedCornersCharuco(
                    image=img,
                    charucoCorners=charuco_corners,
                    charucoIds=charuco_ids)

                # If our image size is unknown, set it now
                if not image_size:
                    image_size = gray.shape[::-1]

                good += 1
                show(img)

            else:
                show(img)

        except cv2.error as e:
            show(img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    return corners_all, ids_all, image_size
Example #18
0
    def Image_Process(self):
        ret = self.frame
        gray = cv2.cvtColor(self.frame,
                            cv2.COLOR_BGR2GRAY)  # 영상을 grayscale로 변환
        # 카메라 파라미터, grayscale이미지, dictionary와 파라미터를 참고하여 마커를 찾는다. 모서리와 마커 id 반환 (rejected?)
        corners, ids, rejected = aruco.detectMarkers(image=gray,
                                                     dictionary=aruco_dict,
                                                     parameters=parameters,
                                                     cameraMatrix=cam_mat,
                                                     distCoeff=cam_distort)
        # 마커가 검출될 경우
        if ids is not None:  #and self.freeze is not True:
            self.cant_find_mark = False
            # corner, 마커 크기, 카메라 파라미터를 이용하여 마커의 pose를 알아낸다
            ret = aruco.estimatePoseSingleMarkers(corners, marker_size,
                                                  cam_mat, cam_distort)
            rvec, tvec = ret[0][0, 0, :], ret[1][
                0, 0, :]  # 알아낸 pose 행렬을 자세, 위치 벡터로 변환
            aruco.drawDetectedMarkers(self.frame,
                                      corners)  # 코너를 이용하여 frame에 마커를 그린다
            aruco.drawAxis(
                self.frame, cam_mat, cam_distort, rvec, tvec, 10
            )  # 카메라 파라미터, 마커 위치 및 자세 벡터를 이용하여 frame 이미지에 10 길이의 xyz축을 그린다.

            str_position = "MARKER Position x=%4.0f  y=%4.0f  z=%4.0f" % (
                tvec[0], tvec[1], tvec[2])  # 카메라 기준 마커의 위치 좌표
            cv2.putText(self.frame, str_position, (0, 100), font, 1,
                        (0, 255, 0), 2, cv2.LINE_AA)

            R_ct = np.matrix(
                cv2.Rodrigues(rvec)[0])  # 로드리그스 행렬을 이용하여 벡터를 회전 행렬 변환
            R_tc = R_ct.T

            roll_marker, pitch_marker, yaw_marker = rotationMatrixToEulerAngles(
                R_flip * R_tc)  # 오일러 행렬로 변환
            str_attitude = "MARKER Attitude r=%4.0f  p=%4.0f  y=%4.0f" % (
                math.degrees(roll_marker), math.degrees(pitch_marker),
                math.degrees(yaw_marker))  # 마커의 roll, pitch, yaw
            cv2.putText(self.frame, str_attitude, (0, 150), font, 1,
                        (0, 255, 0), 2, cv2.LINE_AA)

            pos_camera = -R_tc * np.matrix(
                tvec).T  # 회전 행렬을 이용하여 마커 기준 카메라의 위치 벡터 반환

            str_position = "CAMERA Position x=%4.0f  y=%4.0f  z=%4.0f" % (
                pos_camera[0], pos_camera[1], pos_camera[2])  # 마커 기준 카메라의 위치
            cv2.putText(self.frame, str_position, (0, 200), font, 1,
                        (0, 255, 0), 2, cv2.LINE_AA)

            # 마커 기준 카메라의 자세는 카메라 기준 마커의 자세와 같다
            str_attitude = "CAMERA Attitude r=%4.0f  p=%4.0f  y=%4.0f" % (
                math.degrees(roll_marker), math.degrees(pitch_marker),
                math.degrees(yaw_marker))
            cv2.putText(self.frame, str_attitude, (0, 250), font, 1,
                        (0, 255, 0), 2, cv2.LINE_AA)

            self.CAM_X = pos_camera[0]
            self.CAM_Z = pos_camera[2]
            self.CAM_PITCH = math.degrees(pitch_camera)
        else:  # 마커가 검출되지 않았을 경우
            self.cant_find_mark = True
            cv2.putText(self.frame, "can't find any marker", (0, 100), font, 1,
                        (255, 0, 0), 2, cv2.LINE_AA)
Example #19
0
  def callbackImage(self,data):

    global out, ids
    try:
      src_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
      (rows,cols,channels) = src_image.shape
    except CvBridgeError as e:
      print(e)

    #-- 180 deg rotation matrix around x axis
    R_flip = np.zeros((3,3), dtype=np.float)
    R_flip[0,0] = +1.0
    R_flip[1,1] = -1.0
    R_flip[2,2] = -1.0

    #-- Convert in gray scale\n",
    gray = cv2.cvtColor(src_image, cv2.COLOR_BGR2GRAY) #-- remember, OpenCV stores color images in Blue, Green, Red

    #-- Find all the aruco markers in the image\n",
    corners, ids, rejected = aruco.detectMarkers(image=gray,
                                                  dictionary=aruco_dict,
                                                  parameters=parameters,
                                                  cameraMatrix=camera_matrix,
                                                  distCoeff=camera_distortion)

    #rospy.loginfo("ids[]: %f", len(ids[0]))
    #int(ids) == id_to_find:

    # out = np.logical_and(ids != None, ids[0] == id_to_find)
    # rospy.loginfo(out)

    if ids != None:
      #rospy.loginfo("Aruco working!")

      if ids[0] == id_to_find:

        #if (int(ids[0]) != None and int(ids[0]) == id_to_find):
        #-- ret= [rvec,tvec, ?]
        #-- array of rotation and position of each marker in camera frame
        #-- rvec = [[rvec_1, [rvec2], ...]]  attitude of the marker respect to camera frame
        #-- tvec = [[tvec_1, [tvec2], ...]]  position of the marker in camera frame
        ret = aruco.estimatePoseSingleMarkers(corners, marker_size, camera_matrix, camera_distortion)

        #-- Unpack the output, get only the first\n",
        rvec, tvec = ret[0][0,0,:], ret[1][0,0,:]

        #-- Draw the detected marker and put a reference frame over it\n",
        aruco.drawDetectedMarkers(src_image, corners)
        aruco.drawAxis(src_image, camera_matrix, camera_distortion, rvec, tvec, 0.3)

        #-- Obtain the rotation matrix tag->camera
        R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
        R_tc = R_ct.T # function transpose() with '.T'

        #-- Get the attitude in terms of euler 321 (Needs to be flipped first)
        pitch_marker, roll_marker, yaw_marker = rotationMatrixToEulerAngles(R_tc)

        #-- Now get Position and attitude f the camera respect to the marker
        #pos_camera = -R_tc*np.matrix(tvec).T
        pitch_camera, roll_camera, yaw_camera = rotationMatrixToEulerAngles(R_flip*R_tc)

        pos_camera = Point(-tvec[0], tvec[1], tvec[2])

        #-- Print 'X' in the center of the camera
        cv2.putText(src_image, "X", (cols/2, rows/2), font, 1, (0, 0, 255), 2, cv2.LINE_AA)

        ###############################################################################

        #-- Print the tag position in camera frame
        str_position = "Position x = %4.0f  y = %4.0f  z = %4.0f"%(pos_camera.x, pos_camera.y, pos_camera.z)
        cv2.putText(src_image, str_position, (0, 30), font, 2, (255, 255, 0), 2, cv2.LINE_AA)

        #-- Get the attitude of the camera respect to the frame
        str_attitude = "Attitude pitch = %4.0f  roll = %4.0f  yaw = %4.0f"%(math.degrees(0),math.degrees(0),
                            math.degrees(yaw_camera))
        cv2.putText(src_image, str_attitude, (0, 60), font, 2, (255, 255, 0), 2, cv2.LINE_AA)

        ###############################################################################

        #rospy.loginfo('Id detected!')

    #else:
    #rospy.loginfo('No Id detected!')
    #print('No Id detected!')
    foco = 527
    x = (foco*self.vec.x)/self.vec.z
    y = (foco*self.vec.y)/self.vec.z

    X = x+(cols/2)
    Y = y+(rows/2)
    print(X)

    # Center coordinates 
    center_coordinates = (int(X), int(Y))
    #center_coordinates = (cols/2, rows/2) 
      
    # Radius of circle 
    radius = 20
       
    # Blue color in BGR 
    color = (255, 0, 0) 
       
    # Line thickness of 3 px 
    thickness = 3
       
    # Using cv2.circle() method 
    # Draw a circle with blue line borders of thickness of 2 px 
    src_image = cv2.circle(src_image, center_coordinates, radius, color, thickness) 
    
    cv2.imshow("Image", src_image)
    cv2.waitKey(1)

    try:
      self.image_pub.publish(self.bridge.cv2_to_imgmsg(src_image, "bgr8"))
    except CvBridgeError as e:
      print(e)
Example #20
0
    def detect_aruco(self, frame):
        # dictionary - 4x4 aruco images
        aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
        #  aruco_dict = aruco.generateCustomDictionary(43,4)
        #  print(aruco_dict)

        # get gray picture
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #  gray = frame[:,:,0].astype(np.uint8)

        # get aruco frames
        parameters = aruco.DetectorParameters_create()
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        #  print(ids)

        # put aruco detected markers on top of colored frame (webcam input)
        #  frame_markers = aruco.drawDetectedMarkers(gray.copy(), corners, ids)
        frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)

        # get (x,y) center coordinates of aruco marker
        # if only one marker is present
        if ids is not None:
            # get number of matched IDs
            count_matches = 0

            # get number of aruco tags
            for i in range(len(ids)):
                if ids[i][0] == NK_ARUCO_ID:
                    count_matches = count_matches + 1
                    match_idx = i

            # move cursor only if exactly one valid ArUco symbol is detected
            if count_matches == 1:

                c = corners[match_idx][0]
                #  print(c)
                # detect one point only
                #  x = c[0][0]
                #  y = c[0][1]
                x = c[:, 0].mean()
                y = c[:, 1].mean()

                # draw center point
                draw_x1 = int(round(x)) - 5
                draw_y1 = int(round(y)) - 5
                draw_x2 = int(round(x)) + 5
                draw_y2 = int(round(y)) + 5
                frame_markers = cv2.rectangle(frame_markers,
                                              (draw_x1, draw_y1),
                                              (draw_x2, draw_y2), (0, 255, 0),
                                              3)
                #  print("c[" + str(i) + "] = " + str(c[i]))
                #  print("x = " + str(x))
                #  print("y = " + str(y))
                #  print("")

                # move cursor if checkbox is checked
                move = 0
                if self.moveCursorCheckBox.checkState():
                    move = 1

                # move cursor if needed
                self.move_cursor(move, x, y)

        return frame_markers
Example #21
0
parameters = aruco.DetectorParameters_create()
x = 8  # horizontal
y = 11  # vertical
sqr = 0.1  # solid black squares
mrk = 0.05 # markers, must be smaller than squares
board = aruco.CharucoBoard_create(x,y,sqr,mrk,dictionary)

imgs = get_images("cal-imgs/*.png", gray=False)

calcorners = []
calids = []

for im in imgs:
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    # corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary, parameters=parameters)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary)
    # print('ids 1:', len(ids))
    # if ids were found, then
    if len(ids) > 0:
        # print(corners, ids, rejectedImgPoints)
        ret, chcorners, chids = aruco.interpolateCornersCharuco(corners, ids, gray, board)
        calcorners.append(chcorners)
        calids.append(chids)

        im2 = im.copy()
        aruco.drawDetectedCornersCharuco(im2, chcorners, chids)
        # aruco.drawDetectedMarkers(im2, rejectedImgPoints, borderColor=(100, 0, 240))

        cv2.imshow('markers', im2)
        cv2.waitKey()
Example #22
0
File: procman.py Project: SWiT/ARTT
    def scanSubprocess(self, timestamp, timeout, image, offsetx, offsety, resultsque):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        self.corners, self.ids, self.rejectedImgPoints = aruco.detectMarkers(gray, self.aruco_dict, parameters=self.parameters)

        resultsque.put([timestamp, self.corners, self.ids, self.rejectedImgPoints])
        resultsque.close()
size = args["marker_with_border"] / 2
nsize = -1 * size
axis = np.float32([[nsize, size, 0], [size, size, 0], [size, nsize, 0], [nsize, nsize, 0]])
counter = 0

# Now comes the recording part
print("Press [SPACE] to record a sample and [ESC] to close the application:")
while(True):
	# Get frame from realsense, convert to bgr and create a grayscale image for marker detection
	frames = pipeline.wait_for_frames()
	frame = cv2.cvtColor(np.asanyarray(frames.get_color_frame().get_data()), cv2.COLOR_RGB2BGR)
	display_frame = deepcopy(frame)
	frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    
	# Detect markers and draw them
	corners, ids, rejectedImgPoints = aruco.detectMarkers(frame_gray, aruco.getPredefinedDictionary(dictionary))
	if ids is not None and len(ids) > 0:
		display_frame = aruco.drawDetectedMarkers(display_frame, corners, ids)
	
	# Create the gt mask
	mask = np.zeros(shape=(display_frame.shape[0], display_frame.shape[1], display_frame.shape[2])).astype(np.uint8)
	if ids is not None and len(ids) > 0:
		for marker in corners:
			rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(marker, 1, mtx, dist)
			imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
			imgpts = np.int32(imgpts).reshape(-1, 2)
			mask = cv2.drawContours(mask, [imgpts[:4]], -1, (255, 255, 255), -1)
	
	# Display the current frame with the corresponding mask 
	cv2.imshow("Record", np.hstack((cv2.resize(display_frame, (0,0), fx=0.5, fy=0.5) , cv2.resize(mask, (0,0), fx=0.5, fy=0.5))))
	key = cv2.waitKey(100)
Example #24
0
    def getAndSearchImage(self):
        global drone
        global thread_lock_camera_frame
        global render_frame
        global new_frame
        global W
        global H
        try:
            # print pygame.image
            pixelarray = drone.get_image()
            if pixelarray != None:
                frame = pixelarray[:, :, ::-1].copy()
                #resize image
                resized = cv2.resize(frame, (W, H))
                # aruco detection
                gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
                aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
                parameters = aruco.DetectorParameters_create()

                #lists of ids and the corners beloning to each id
                corners, ids, rejectedImgPoints = aruco.detectMarkers(
                    gray, aruco_dict, parameters=parameters)

                # iterate over all found markers to determine the one to follow (biggest one)
                selected_corners = []
                max_index = (0, 0)
                counter = 0
                if len(corners) > 0:
                    dim = corners[0].shape
                    #print dim
                    while counter < dim[0]:
                        tmp_corners = ((corners[0])[counter])
                        # A=(1/2)|[(x3-x1)(y4-y2) +(x4-x2)(y1-y3)]|
                        area = 0.5 * ((tmp_corners[2][0] - tmp_corners[0][0]) *
                                      (tmp_corners[3][1] - tmp_corners[1][1]) +
                                      (tmp_corners[3][0] - tmp_corners[1][0]) *
                                      (tmp_corners[0][1] - tmp_corners[2][1]))
                        if area > max_index[0]:
                            max_index = (area, counter)
                        counter += 1

                    max_corners = ((corners[0])[max_index[1]])
                    selected_corners = np.array([
                        np.array([(corners[0])[max_index[1]]],
                                 dtype=np.float32)
                    ])  #[max_index[0]*4:max_index[0]*4+3]

                # draw all markers
                display = aruco.drawDetectedMarkers(resized, corners)

                thread_lock_camera_frame.acquire()
                render_frame = display
                new_frame = True
                thread_lock_camera_frame.release()

                # prepare function output
                if len(selected_corners) > 0:
                    x, y = max_corners.sum(axis=0) / 4
                    area = max_index[0]
                else:
                    x = -W
                    y = -1
                    area = -1
                return (x - W / 2, y - H / 2), area
        except:
            pass
    def precess(self):
        isfinish = False
        rgb = cv2.cvtColor(self.arr, cv2.COLOR_BGR2RGB)
        gray = cv2.cvtColor(self.arr, cv2.COLOR_BGR2GRAY)

        dictionary = aruco.Dictionary_get(aruco.DICT_4X4_1000)

        corners, ids, rejectedPoints = aruco.detectMarkers(
            gray, dictionary, parameters=aruco.DetectorParameters_create())
        ### draw marker
        aruco.drawDetectedMarkers(rgb, corners, ids)

        if len(corners) == 4:
            self.isDetect4Marker = True
            for i in ids:
                if i[0] not in [0, 1, 2, 3]:
                    self.isDetect4Marker = False

            self.corners = corners
            self.ids = ids

        if self.isDetect4Marker:

            plotMarker(rgb, self.corners, self.ids)

            rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                self.corners, self.arucoSize, self.mtx, self.dist)

            Rpose = from_cam_to_robot(tvecs,
                                      -np.pi / 6)  ### pose for robot coor
            PoseID = self.ids

            for i in range(rvecs.shape[0]):
                aruco.drawAxis(rgb, self.mtx, self.dist, rvecs[i, :, :],
                               tvecs[i, :, :], 0.05)

            ### find Internal Corners
            insidecorners = findInternalCorner(self.corners)

            ### draw boundary
            cv2.polylines(rgb, [np.array(insidecorners, dtype=int)], True,
                          (0, 255, 0), 4)

            Pdst = np.array(
                [[0, 0], [self.w, 0], [self.w, self.h], [0, self.h]],
                np.float32)

            ### detect circle
            hsv = cv2.cvtColor(self.arr, cv2.COLOR_BGR2HSV)
            lower = np.array([0, 0, 0], np.uint8)
            upper = np.array([180, 255, 50], np.uint8)
            sep = cv2.inRange(hsv, lower, upper)

            kernel = np.ones((9, 9), np.float32) / 81
            sep = cv2.filter2D(sep, -1, kernel)

            M = cv2.getPerspectiveTransform(insidecorners, Pdst)
            warped = cv2.warpPerspective(sep, M, (self.w, self.h))
            warped = 255 - warped
            circles = cv2.HoughCircles(warped,
                                       cv2.HOUGH_GRADIENT,
                                       1,
                                       10,
                                       param1=1,
                                       param2=30,
                                       minRadius=30,
                                       maxRadius=50)

            if circles is not None:

                ### remove circle detect the aruco
                new_circles = []

                for i in circles[0]:
                    if not (i[1] > self.h - self.aS and i[0] < self.aS):
                        if not (i[1] > self.h - self.aS
                                and i[0] > self.w - self.aS):
                            new_circles.append(i)
                            cv2.circle(warped, (i[0], i[1]), i[2], (0, 0, 255),
                                       2)

                circles = np.array(new_circles)
                cv2.imshow('gray', warped)
                cv2.waitKey(1)

                if len(circles) >= 3:

                    kmeans = KMeans(n_clusters=3, random_state=0).fit(circles)
                    c = kmeans.labels_
                    center = kmeans.cluster_centers_

                    dis0 = np.sqrt(
                        (center[0] - center[1]).dot(center[0] - center[1]))
                    dis1 = np.sqrt(
                        (center[1] - center[2]).dot(center[1] - center[2]))
                    dis2 = np.sqrt(
                        (center[2] - center[0]).dot(center[2] - center[0]))

                    ### check detect three tube in the work space
                    if dis0 > 50 and dis1 > 50 and dis2 > 50:
                        print("three tube")

                        index = np.argsort(
                            [center[0][0], center[1][0], center[2][0]])

                        circles = np.uint16(np.around(circles))
                        maxCA = 0
                        maxhA = 0
                        maxCB = 0
                        maxhB = 0
                        maxCC = 0
                        maxhC = 0
                        for i in range(len(circles)):
                            ## left tube
                            if c[i] == index[0]:
                                if circles[i][1] > maxhA:
                                    maxCA = circles[i]
                            ## mid tube
                            if c[i] == index[1]:
                                if circles[i][1] > maxhB:
                                    maxCB = circles[i]
                            ## right tube
                            if c[i] == index[2]:
                                if circles[i][1] > maxhC:
                                    maxCC = circles[i]

                        self.circleList[0].append(maxCA)
                        self.circleList[1].append(maxCB)
                        self.circleList[2].append(maxCC)

                        L = len(self.circleList[0])
                        if L > 50:
                            warped = cv2.cvtColor(warped, cv2.COLOR_GRAY2RGB)
                            p = Position()
                            tube = np.sum(self.circleList[0][L - 10:L - 1],
                                          axis=0) / 9

                            cv2.circle(warped, (tube[0], tube[1]), tube[2],
                                       (0, 0, 255), 2)
                            cv2.circle(warped, (tube[0], tube[1]), 2,
                                       (0, 0, 255), 3)

                            ### translate to Robot coord
                            IDs = IDAruco2IDCorner(PoseID, 0)

                            tubeX = Rpose[IDs][0] - (
                                tube[1] * self.workSpaceW /
                                self.w) - self.arucoSize / 2 + 0.02
                            tubeY = Rpose[IDs][1] - (
                                tube[0] * self.workSpaceH /
                                self.h) + self.arucoSize / 2
                            tubeZ = Rpose[IDs][2]

                            p.Ax = tubeX
                            p.Ay = tubeY
                            p.Az = tubeZ
                            p.A = True

                            tube = np.sum(self.circleList[1][L - 10:L - 1],
                                          axis=0) / 9

                            cv2.circle(warped, (tube[0], tube[1]), tube[2],
                                       (0, 0, 255), 2)
                            cv2.circle(warped, (tube[0], tube[1]), 2,
                                       (0, 0, 255), 3)

                            ### translate to Robot coord
                            IDs = IDAruco2IDCorner(PoseID, 0)

                            tubeX = Rpose[IDs][0] - (
                                tube[1] * self.workSpaceW /
                                self.w) - self.arucoSize / 2 + 0.02
                            tubeY = Rpose[IDs][1] - (
                                tube[0] * self.workSpaceH /
                                self.h) + self.arucoSize / 2
                            tubeZ = Rpose[IDs][2]

                            p.Bx = tubeX
                            p.By = tubeY
                            p.Bz = tubeZ
                            p.B = True

                            tube = np.sum(self.circleList[2][L - 10:L - 1],
                                          axis=0) / 9

                            cv2.circle(warped, (tube[0], tube[1]), tube[2],
                                       (0, 0, 255), 2)
                            cv2.circle(warped, (tube[0], tube[1]), 2,
                                       (0, 0, 255), 3)

                            ### translate to Robot coord
                            IDs = IDAruco2IDCorner(PoseID, 0)

                            tubeX = Rpose[IDs][0] - (
                                tube[1] * self.workSpaceW /
                                self.w) - self.arucoSize / 2 + 0.02
                            tubeY = Rpose[IDs][1] - (
                                tube[0] * self.workSpaceH /
                                self.h) + self.arucoSize / 2 - 0.02
                            tubeZ = Rpose[IDs][2]

                            p.Cx = tubeX
                            p.Cy = tubeY
                            p.Cz = tubeZ
                            p.C = True

                            IDs = IDAruco2IDCorner(PoseID, 3)

                            tubeX = Rpose[IDs][0]
                            tubeY = Rpose[IDs][1]
                            tubeZ = Rpose[IDs][2]

                            IDs = IDAruco2IDCorner(PoseID, 2)

                            tubeX1 = Rpose[IDs][0]
                            tubeY1 = Rpose[IDs][1]
                            tubeZ1 = Rpose[IDs][2]

                            p.Dx = (tubeX + tubeX1) / 2
                            p.Dy = (tubeY + tubeY1) / 2
                            p.Dz = (tubeZ + tubeZ1) / 2
                            p.D = True
                            cv2.imshow('gray', warped)
                            cv2.waitKey(1)
                            if raw_input("tube position is correct?") == "yes":
                                self.pubTubePosition.publish(p)
                                #print(os.path.dirname(__file__))
                                isfinish = True
                                #cv2.imwrite(os.path.dirname(__file__) + "/gray.png",warped)
                                #cv2.imwrite(os.path.dirname(__file__) + "/img.png",rgb)

        cv2.imshow('img', rgb)
        cv2.waitKey(1)

        return isfinish
#start combination cal.
for a in range(0,20):
    print(a)
    for j in range(0,20):
        for k in range(0,20):
            for d in range(0,20):
                #첫번째 20단위로 실행시
                
                distCoefss = (-0.060285+a*0.0054385, -0.500998+j*0.03915, -0.003908+k*0.000336,-0.004445+d*0.0003888)
                #두번째
                #distCoeffs =(-0.0396187 + (a*0.0010877), -0.195628+ (j*0.00783), -0.0035172+ (k * 0.000672), -0.00063476+(d * 0.0000777))
                
                while(True):
                    ret, image = cap.read()
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                    corners, ids, rejectedImgPoints = aruco.detectMarkers(image=image, dictionary=arucoDict, cameraMatrix=cameraMatrix, distCoeff=distCoeffs, parameters = parameters)
                    rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, 0.06, cameraMatrix, distCoeffs)
                    if ids.size == 9:
                            
                       

                        temp = [(),(),(),(),(),(),(),(),()]
                        
                        for i in range(0,ids.size):
             
        
                            idr = ids[i][0]  
        
        
                            #x = (corners[i][0][0][0] + corners[i][0][1][0] + corners[i][0][2][0] + corners[i][0][3][0]) / 4
                            #y = (corners[i][0][0][1] + corners[i][0][1][1] + corners[i][0][2][1] + corners[i][0][3][1]) / 4
Example #27
0
	def processFrame(self):
		# grab the frame from the threaded video stream
		while(True):
			frame, repeated = self.vs.read()
			if(not repeated):
				break

		# detect aruco markers in the frame
		gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
		corners, idsM, rejectedImgPoints = aruco.detectMarkers(gray, self.aruco_dict, parameters=self.parameters, cameraMatrix=self.refinedMatrix, distCoeff=self.distCoeffs)

		#remove detected markers that aren't defined in marker_points.txt, and change format to array from matrix
		ids = []
		if(idsM is not None):
			#change ids format to array
			for i in idsM:
				ids = np.append(ids, i[0])
			#remove undefined ids
			for i in ids:
				if(i not in self.markerIds):
					pos = np.where(ids == i)[0][0] #the reason for these brackets is to undo numpy's return of array of arrays
					ids = np.delete(ids, pos)

		success = False
		world_coordinates = []
		rot_vector = []
		trans_vector = []
		retIds = []

		#if a defined marker has been detected
		if(len(ids) > 0):

			#create objPoints depending on which markers were detected, respecting
			#the order of 'corners' and 'ids'
			objPoints = np.empty((0,3),float)
			imgPoints = np.empty((0,2), float)
			for i in ids:
				#self.markerIds may be unordered, so we must find the index of each id
				pos = np.where(self.markerIds == i)[0][0]
				#extract the corresponding points of the id
				markerIPoints = self.markerPoints[pos*4:(pos+1)*4][:]
				#insert the matrix into our final objPoints
				objPoints = np.vstack((objPoints, markerIPoints))
				#insert the corresponding detected corners into imgPoints
				posCorner = np.where(idsM == i)[0][0]
				imgPoints = np.vstack((imgPoints, corners[posCorner][0]))
				retIds.append(i)

			#show the frame, with detected markers
			gray = aruco.drawDetectedMarkers(gray, corners)

			success, rotation_vector, translation_vector = cv2.solvePnP(objPoints, imgPoints, self.refinedMatrix, self.distCoeffs, flags=cv2.SOLVEPNP_ITERATIVE)
			translation_vector.mean()
			if(success):
				#solvePnP's x-axis rotation angle is of opposite sign relative to the Y coordinate

				if(self.applyDisplacement):
					for i in range(3):
						translation_vector[i] = translation_vector[i] + self.displacementArray[i]

				world_coordinates = self.__camera_to_world_coords(rotation_vector, translation_vector)
				rotation_vector[0] = -rotation_vector[0]

				if(self.inverseX):
					world_coordinates[0] = -world_coordinates[0]

				if(self.inverseY):
					world_coordinates[1] = -world_coordinates[1]

				if(self.inverseZ):
					world_coordinates[2] = -world_coordinates[2]

				#turn rotation_vector from radians to degrees
				rotation_vector = rotation_vector / math.pi * 180

				if(self.inverseXAngle):
					rotation_vector[0] = -rotation_vector[0]

				if(self.inverseYAngle):
					rotation_vector[1] = -rotation_vector[1]

				if(self.inverseZAngle):
					rotation_vector[2] = -rotation_vector[2]

				#build output arrays
				for i in range(3):
					rot_vector.append((rotation_vector[i]).item())
					trans_vector.append((translation_vector[i]).item())


		return success, world_coordinates, rot_vector, trans_vector, retIds, gray
Example #28
0
cv2.imshow("marker", aruco.drawMarker(adict, 0, 400))

# random calibration data. your mileage may vary.
imsize = (800, 600)
K = cv2.getDefaultNewCameraMatrix(np.diag([800, 800, 1]), imsize, True)

# AR scene
cv2.ovis.addResourceLocation("packs/Sinbad.zip")  # shipped with Ogre

win = cv2.ovis.createWindow("arucoAR", imsize, flags=0)
win.setCameraIntrinsics(K, imsize)
win.createEntity("figure", "Sinbad.mesh", (0, 0, 5), (1.57, 0, 0))
win.createLightEntity("sun", (0, 0, 100))

# video capture
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, imsize[0])
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, imsize[1])

while cv2.ovis.waitKey(1) != 27:
    img = cap.read()[1]
    win.setBackground(img)
    corners, ids = aruco.detectMarkers(img, adict)[:2]

    cv2.waitKey(1)

    if ids is None:
        continue

    rvecs, tvecs = aruco.estimatePoseSingleMarkers(corners, 5, K, None)[:2]
win.setCameraPose(tvecs[0].ravel(), rvecs[0].ravel(), invert=True)
Example #29
0
img = np.full((700, 700), 255, np.uint8)

# Pega primer marcador (Imagen de 200x200)
img[100:300, 100:300] = aruco.drawMarker(aruco_dict, 2, 200)
# Pega segundo marcador (Imagen de 200x200)
img[100:300, 400:600] = aruco.drawMarker(aruco_dict, 76, 200)
# Pega tercer marcador (Imagen de 200x200)
img[400:600, 100:300] = aruco.drawMarker(aruco_dict, 42, 200)
# Pega cuarto marcador (Imagen de 200x200)
img[400:600, 400:600] = aruco.drawMarker(aruco_dict, 123, 200)

# Difuminado de imagen
img = cv2.GaussianBlur(img, (11, 11), 0)

# Localización de los marcadores
corners, ids, _ = aruco.detectMarkers(img, aruco_dict)

# Obtiene uan versión en escala de grises de los marcadores
img_color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

#  Dibujar los marcadores
aruco.drawDetectedMarkers(img_color, corners, ids)

# Muestra Imagenes
cv2.imshow('Created AruCo markers', img)
cv2.imshow('Detected AruCo markers', img_color)
cv2.waitKey(0)
cv2.destroyAllWindows()

cap = cv2.VideoCapture(0)
while True:
Example #30
0
def track(matrix_coefficients, distortion_coefficients, markerLength,
          frame_rate):
    print('check markerLengt', markerLength)
    global cX_ini
    global cY_ini
    global cY
    global cX
    global data_blow
    global data_value_dis
    global data_check
    global tvec_ini
    global tvec
    global blow
    global dist
    global data_tvecs
    global value_dis
    global value_blow
    global pile_drive
    global i
    data_tvecs = []  # Prepare to add tvec
    value_dis = []  # Prepare to get result distance(scalar) unit meter
    value_blow = []
    sum_tvec = 0
    tvec_ini = np.array([0, 0, 0])
    tvec = np.array([0, 0, 0])
    dist = 0
    prev = 0
    blow = 0.00
    cX = 0
    cY = 0
    cX_ini = 300
    cY_ini = 50
    check = []

    while cap.isOpened():
        #convert image to gray scale image
        #Capture frame-by-frame
        time_elapsed = time.time() - prev
        ret, frame = cap.read()

        #operations on the frame come here
        if not ret:
            continue

        if time_elapsed > 1. / frame_rate:
            prev = time.time()
            #frame = cv2.resize(frame, (4000, 5000))
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # Change grayscale
            aruco_dict = aruco.Dictionary_get(
                aruco.DICT_5X5_250)  # Use 5x5 dictionary to find markers
            parameters = aruco.DetectorParameters_create(
            )  # Marker detection parameters
            # lists of ids and the corners beloning to each id
            corners, ids, rejected_img_points = aruco.detectMarkers(
                gray,
                aruco_dict,
                parameters=parameters,
                cameraMatrix=np.float32(matrix_coefficients),
                distCoeff=np.float32(distortion_coefficients))
            # calculate(ids,corners,matrix_coefficients,distortion_coefficients,frame)
            if np.all(
                    ids is not None):  # If there are markers found by detector
                for i in range(0, len(ids)):  # Iterate in markers
                    # Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
                    rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(
                        corners[i], markerLength,
                        np.float32(matrix_coefficients),
                        np.float32(distortion_coefficients))

                    (rvec - tvec
                     ).any()  # get rid of that nasty numpy value array error
                    aruco.drawDetectedMarkers(
                        frame, corners)  # Draw A square around the markers
                    #aruco.drawAxis(frame, np.float32(matrix_coefficients), np.float32(distortion_coefficients),np.float32(rvec), np.float32(tvec), 0.01)  # Draw Axis

                    data_tvecs.append(
                        tvec)  # get every data of tvec in array data_tvecs
                    length = len(data_tvecs)  # For easy to calculate equation
                    if length > 1:
                        diff = data_tvecs[
                            length -
                            1] - tvec_ini  # Find distance between tvec(vector)
                        sum_tvec = sqrt(np.sum(
                            diff**2))  # convert vector to scalar
                        dist = sum_tvec
                        value_dis.append(
                            np.round(sum_tvec, 3)
                        )  # get the distance data(scalar)  in array value
                    #calculate move per blow
                    length = len(value_dis)

                    if length > 10:
                        pile_drive = []
                        for x in range(10):
                            pile_drive.append(value_dis[length - x - 1])
                        #print('pile drive before', pile_drive)

                        if np.std(pile_drive) <= .005:
                            #print('pile drive after', pile_drive)
                            blow = float(np.average(pile_drive))
                            value_blow.append(np.round(blow, 4))
                            check = pd.Series(value_blow)
                            check = check.unique()
                        value_dis = []
                        pile_drive = []
            if len(corners) > 0:
                # flatten the ArUco IDs list
                ids = ids.flatten()
                # loop over the detected ArUCo corners
                for (markerCorner, markerID) in zip(corners, ids):
                    # extract the marker corners (which are always returned in
                    # top-left, top-right, bottom-right, and bottom-left order)
                    find_center = corners.copy()
                    find_center = markerCorner.reshape((4, 2))
                    (topLeft, topRight, bottomRight, bottomLeft) = find_center
                    # convert each of the (x, y)-coordinate pairs to integers
                    topRight = (int(topRight[0]), int(topRight[1]))
                    bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
                    bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
                    topLeft = (int(topLeft[0]), int(topLeft[1]))
                    # draw the bounding box of the ArUCo detection
                    #cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)
                    #cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)
                    #cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)
                    #cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)
                    # compute and draw the center (x, y)-coordinates of the ArUco
                    # marker
                    cX = int((topLeft[0] + bottomRight[0]) / 2.0)
                    cY = int((topLeft[1] + bottomRight[1]) / 2.0)
                    cv2.circle(frame, (cX, cY), 4, (0, 0, 255), -1)
                    # draw the ArUco marker ID on the image
                    cv2.putText(frame, str(markerID),
                                (topLeft[0], topLeft[1] - 15),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.putText(frame, str(topRight),
                                (topRight[0], topRight[1] - 15),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.circle(frame, (cX_ini, cY_ini), 4, (0, 255, 0), -1)

            fps = 1 / time_elapsed
            fps = np.round(fps, 1)
            cv2.putText(frame, 'F5PS  ' + str(fps), (50, 75),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            data_value_dis = pd.DataFrame(value_dis, columns=['real Distance'])
            #data_blow = pd.DataFrame(value_blow, columns = ['avg blow'])
            #data_check = pd.DataFrame(check, columns = ['check'])
            #for gui
            image_in = frame
            image_in = Image.fromarray(image_in)
            vdo.image = image_in
            diff_text = "{:.3f}".format(float(dist))
            blow_show = "{:.3f}".format(float(blow))
            label1.text = 'distance= ' + diff_text + ' m'
            #label2.text ='deform per blows='+blow_show + ' m'
            #label3.text ='deform per blows realtime ='+ str(np.round(blow,4))+ ' m'

    return
camera = PiCamera()
#camera.resolution = (1040, 784)
camera.resolution = (1024, 770)
camera.framerate = 30
rawCapture = PiRGBArray(camera, size=(1024, 770))

time.sleep(0.1)

tvec0 = np.array([[[0.0, 0.0, 0.0]]])

rvecmax = 0.0

for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
    image = frame.array
    cv2.rectangle(image, (0, 0), (200, 200), (220, 240, 230), -1)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(image, aruco_dict)
    image = aruco.drawDetectedMarkers(image, corners) # marker körvonalak
    rvec, tvec ,_ = aruco.estimatePoseSingleMarkers(corners, 0.05, mtx, distor)
    if ids is not None:
        for i in range(0, ids.size):
            #print(image.dtype)
            rr, thet = ra.rArea(corners)
            aruco.drawAxis(image, mtx, distor, rvec[0], tvec[0], 0.06) # np.array([0.0, 0.0, 0.0])
            cv2.putText(image, "%.1f cm -- %.0f deg" % ((tvec[0][0][2] * 100), (rvec[0][0][2] / math.pi * 180)), (0, 230), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (244, 244, 244))
            cv2.circle(image, (100, int(rr / 600)), 6, (200, 40, 230), -1)
            R, _ = cv2.Rodrigues(rvec[0])
            cameraPose = -R.T * tvec[0]
    
    cv2.imshow("Frame", image)
    key = cv2.waitKey(1) & 0xFF
    rawCapture.truncate(0)
    def image_cb(self, msg):
        # get image from camera
        image = self.bridge.imgmsg_to_cv2(msg)
        # transfer to grey image
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # define the dictionary
        aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)
        # define parameters for aruco_detect
        parameters = aruco.DetectorParameters_create()
        # calibration matrices
        # default matrices used here since there is no distortion in simulation
        camera_matrix = np.array(
                         	[[image.shape[1], 0, image.shape[1]/2],
                         	[0, image.shape[1], image.shape[0]/2],
                         	[0, 0, 1]], dtype = "double"
                         	)
        dist = np.array( [0, 0, 0, 0, 0] )

        # get ID and corners
        corners, ids, rejected_img_points = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)

        # when there is marker in sight
        if ids is not None:

            # get image dimention
            h, w, d = image.shape
            # update id that is seen
            self.id = max(ids)
            # estimate pose of each marker
            ret = aruco.estimatePoseSingleMarkers(corners, 0.2, camera_matrix, dist)
            rvec, self.tvec = ret[0][0,0,:], ret[1][0,0,:]
            # clear numbers
            (rvec - self.tvec).any()
            # draw contours of markers
            aruco.drawDetectedMarkers(image, corners)
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(image, "Id: " + str(self.id), (0,64), font, 1, (0,255,0),2,cv2.LINE_AA)

            # find the center pixel of the marker with largest id
            x_sum = corners[0][0][0][0]+corners[0][0][1][0]+corners[0][0][2][0]+corners[0][0][3][0]
            y_sum = corners[0][0][0][1]+corners[0][0][1][1]+corners[0][0][2][1]+corners[0][0][3][1]
            cx = int(x_sum*.25)
            cy = int(y_sum*.25)
            # put a circle there
            cv2.circle(image, (cx, cy), 15, (0,0,255), -1)

            # use "P" control to move
            err = cx - w/2
            self.twist.linear.x = 0.1
            self.twist.angular.z = -float(err) / 500 / 4
            self.cmd_vel_pub.publish(self.twist)
            ids = None
        
        # when there's no marker in sight
        else:
            if min(self.ranges[0:30]) < 1.2 or min(self.ranges[330:360]) < 1.2:
                if min(self.ranges[0:100]) < min(self.ranges[260:360]):
                    # turn right
                    self.twist.angular.z = ABS_ROTATE * (-1)
                    self.twist.linear.x = FORWARD_SPEED
                else:
                    # turn left
                    self.twist.angular.z = ABS_ROTATE
                    self.twist.linear.x = FORWARD_SPEED
                self.cmd_vel_pub.publish(self.twist)
            else:
                pid = self.calc_pid()
                self.twist.angular.z = ABS_ROTATE * pid
                self.twist.linear.x = FORWARD_SPEED
                self.cmd_vel_pub.publish(self.twist)
            
        # show the image window
        cv2.imshow('image', image)
        cv2.waitKey(3)
Example #33
0
marker_length = .20955  #in meters
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_6X6_250)

#load intrinsic
camera_mtx = np.asmatrix(np.loadtxt("cameramatrix.txt"))
camera_dist = np.loadtxt("cameradistortion.txt")

img = cv2.imread("aruco.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

parameters = aruco.DetectorParameters_create()

#find markers
corners, idx, rejected = aruco.detectMarkers(gray,
                                             aruco_dict,
                                             parameters=parameters)
print(corners)

img = aruco.drawDetectedMarkers(img, corners)
for rect in rejected:
    print("Rect:" + str(rect))
    for point in rect[0]:
        cv2.circle(img, (point[0], point[1]), 2, (0, 255, 0), -1)

#get pose
rvecs, tvecs, obj_points = aruco.estimatePoseSingleMarkers(
    corners, marker_length, camera_mtx, camera_dist)

#save rotation and translation
np.savetxt("rotationvector.txt", rvecs[0])
Example #34
0
while cap.isOpened():
    fn += 1

    im_src = cv2.imread('../media/icon2.png')
    # Capture frame-by-frame

    ret, frame = cap.read()

    org_frame = frame
    # Check if frame is not empty

    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()
    corners, ids, _ = aruco.detectMarkers(frame,
                                          aruco_dict,
                                          parameters=parameters)
    if np.all(ids is not None):

        # print(np.array(corners))
        # print(corners)
        print(ids)
        for c in corners:
            x1 = (c[0][0][0], c[0][0][1])
            x2 = (c[0][1][0], c[0][1][1])
            x3 = (c[0][2][0], c[0][2][1])
            x4 = (c[0][3][0], c[0][3][1])
            im_dst = frame

            size = im_src.shape
            pts_dst = np.array([x1, x2, x3, x4])
    d = np.linalg.norm(a - b)


while (1):
    ret, frame = cam.read()
    ret, frame2 = cam.read()
    ret, frame3 = cam.read()
    ret, frame5 = cam.read()
    if not ret:
        break
    frame4 = cv2.cvtColor(frame3, cv2.COLOR_BGR2GRAY)

    cv2.rectangle(frame4, (10, 10), (100, 100), color=(255, 0, 0), thickness=3)

    corners, ids, rejected = aruco.detectMarkers(frame4,
                                                 aruco_dict,
                                                 parameters=parameters)

    canvas = frame.copy()
    canvas2 = frame2.copy()
    # lower = (220,80,80)  #130,150,80
    # upper = (240,100,100) #250,250,120

    # Black
    # lower = np.array([110, 80, 20])
    # upper = np.array([140, 255, 255])

    # Green
    Grlower = np.array([0, 143, 0])
    Grupper = np.array([102, 255, 255])
Example #36
0
File: arena.py Project: SWiT/ARTT
    def scan(self):
        # Scan each zone.
        for z in self.zones:
            z.getImage()
            timestamp = time.time()

            # If the zone is calibrated
            if z.calibrated:
                # Warp the zone part of the image and make it rectangular.
                z.warpImage()

                # Handle any returned symbol data.
#                while self.procman.resultsAvailable():
#                    data = self.procman.getResult()
#                    if len(data) == 2:
#                        ts = data[0]    # timestamp
#                        symbols = data[1]
#                    else:
#                        continue
#
#                # Remove finished processes from the pool.
#                self.procman.removeFinished()
#
#                # Scan for all known markers.
#                #for marker in self.markers.iteritems():
#                    #roi = z.image[c.roiminy:c.roimaxy, c.roiminx:c.roimaxx]
#                    #self.procman.addProcess(timestamp, self.scantimeout, roi, c.roiminx, c.roiminy)
#
#                    # Blank the region of the image where the symbol was last seen.
#                    # Remove jitter by no includeing symbol[2]
#                    #s = copy(c.symbol)
#                    #s.pop(2)
#                    #s = np.delete()
#                    #print c.symbol
#                    #print s,"\n"
#
#                    #poly = array(c.symbol, int32)
#                    #cv2.fillConvexPoly(z.image, poly, (255,255,255))
#                    #cv2.putText(z.image, str(c.id), (c.location[0]-8, c.location[1]+8), cv2.FONT_HERSHEY_PLAIN, 1.5, c.color_augtext, 2)
#
#                # Scan for a new symbol.
#                self.procman.addProcess(timestamp, self.scantimeout, z.image)


            # If the zone is not calibrated
            else:
                #Convert image to grayscale and detect markers.
                gray = cv2.cvtColor(z.image, cv2.COLOR_BGR2GRAY)
                self.corners, self.ids, self.rejectedImgPoints = aruco.detectMarkers(gray, self.aruco_dict, parameters=self.parameters)

                if self.ids is not None:
                    for idx,foundid in enumerate(self.ids):
                        markerid = foundid[0] # I have no idea why the ids are a 2 dimensional list.
                        if markerid not in self.markers:
                            self.markers[markerid] = self.corners[idx][0]
                            self.markerfoundcount[markerid] = 1
                        else:
                            if self.markerfoundcount[markerid] < self.calibrationMin:
                                self.markerfoundcount[markerid] += 1

                # Store the calibrationMin of corners and ids
                if len(self.corners) > 0:
                    retval, charucoCorners, charucoIds = cv2.aruco.interpolateCornersCharuco(self.corners, self.ids, gray, z.projector.board)
                    if charucoCorners is not None and charucoIds is not None and len(charucoCorners)==len(charucoIds):
                        self.calibrationCorners.append(charucoCorners)
                        self.calibrationIds.append(charucoIds)
                        if len(self.calibrationCorners) > self.calibrationMin:
                            # remove the oldest
                            self.calibrationCorners.pop(0)
                            self.calibrationIds.pop(0)


                # Ready to calibrate when all markers have been found the minimum number of times.
                ready = True
                if len(self.markers)-1 == z.projector.maxcalmarkerid:
                    for idx in range(0,z.projector.maxcalmarkerid+1):
                        if self.markerfoundcount[idx] < self.calibrationMin:
                            ready = False
                            break
                else:
                    ready = False

                if ready:
                    #Capture frames to use the in camera calibration
                    print "Calibrating..."
                    try:
                        retval, z.cameraMatrix, z.distCoefs, z.rvecs, z.tvecs = cv2.aruco.calibrateCameraCharuco(self.calibrationCorners, self.calibrationIds, z.projector.board, gray.shape,None,None)
                        #print(retval, cameraMatrix, distCoeffs, rvecs, tvecs)
                        print "Calibration successful"
                    except:
                        print "Calibration failed"

                z.calibrated = ready


        #End of zone loop
        return
def roda_todo_frame(imagem):
    #print("frame")

    try:
        cv_image = bridge.compressed_imgmsg_to_cv2(imagem,
                                                   "bgr8")  # imagem compressed
        #cv_image = cv2.flip(cv_image, -1) # Descomente se for robo real
        #cv_image = bridge.imgmsg_to_cv2(imagem, "bgr8") 			# imagem não compressed
        #cv_image = cv2.resize(cv_image,(cv_image.shape[1]*2,cv_image.shape[0]*2)) # resize image se necessario

        gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        print(ids)

        if ids is not None:
            #-- ret = [rvec, tvec, ?]
            #-- rvec = [[rvec_1], [rvec_2], ...] vetor de rotação
            #-- tvec = [[tvec_1], [tvec_2], ...] vetor de translação
            ret = aruco.estimatePoseSingleMarkers(corners, marker_size,
                                                  camera_matrix,
                                                  camera_distortion)
            rvec, tvec = ret[0][0, 0, :], ret[1][0, 0, :]

            #-- Desenha um retanculo e exibe Id do marker encontrado
            aruco.drawDetectedMarkers(cv_image, corners, ids)
            aruco.drawAxis(cv_image, camera_matrix, camera_distortion, rvec,
                           tvec, 1)

            #-- Print tvec vetor de tanslação em x y z
            str_position = "Marker x=%4.0f  y=%4.0f  z=%4.0f" % (
                tvec[0], tvec[1], tvec[2])
            print(str_position)
            cv2.putText(cv_image, str_position, (0, 100), font, 1, (0, 255, 0),
                        1, cv2.LINE_AA)

            ##############----- Referencia dos Eixos------###########################
            # Linha referencia em X
            cv2.line(
                cv_image,
                (int(cv_image.shape[1] / 2), int(cv_image.shape[0] / 2)),
                (int(cv_image.shape[1] / 2 + 50), int(cv_image.shape[0] / 2)),
                (0, 0, 255), 5)
            # Linha referencia em Y
            cv2.line(
                cv_image,
                (int(cv_image.shape[1] / 2), int(cv_image.shape[0] / 2)),
                (int(cv_image.shape[1] / 2), int(cv_image.shape[0] / 2 + 50)),
                (0, 255, 0), 5)

            #####################---- Distancia Euclidiana ----#####################
            # Calcula a distancia usando apenas a matriz tvec, matriz de tanslação
            # Pode usar qualquer uma das duas formas
            distance = np.sqrt(tvec[0]**2 + tvec[1]**2 + tvec[2]**2)
            distancenp = np.linalg.norm(tvec)

            #-- Print distance
            str_dist = "Dist aruco=%4.0f  dis.np=%4.0f" % (distance,
                                                           distancenp)
            print(str_dist)
            cv2.putText(cv_image, str_dist, (0, 15), font, 1, (0, 255, 0), 1,
                        cv2.LINE_AA)

            #####################---- Distancia pelo foco ----#####################
            #https://www.pyimagesearch.com/2015/01/19/find-distance-camera-objectmarker-using-python-opencv/

            # raspicam v2 focal legth
            FOCAL_LENGTH = 3.6  #3.04
            # pixel por unidade de medida
            m = (camera_matrix[0][0] / FOCAL_LENGTH +
                 camera_matrix[1][1] / FOCAL_LENGTH) / 2
            # corners[0][0][0][0] = [ID][plano?][pos_corner(sentido horario)][0=valor_pos_x, 1=valor_pos_y]
            pixel_length1 = math.sqrt(
                math.pow(corners[0][0][0][0] - corners[0][0][1][0], 2) +
                math.pow(corners[0][0][0][1] - corners[0][0][1][1], 2))
            pixel_length2 = math.sqrt(
                math.pow(corners[0][0][2][0] - corners[0][0][3][0], 2) +
                math.pow(corners[0][0][2][1] - corners[0][0][3][1], 2))
            pixlength = (pixel_length1 + pixel_length2) / 2
            dist = marker_size * FOCAL_LENGTH / (pixlength / m)

            #-- Print distancia focal
            str_distfocal = "Dist focal=%4.0f" % (dist)
            print(str_distfocal)
            cv2.putText(cv_image, str_distfocal, (0, 30), font, 1, (0, 255, 0),
                        1, cv2.LINE_AA)

            ####################--------- desenha o cubo -----------#########################
            # https://github.com/RaviJoshii/3DModeler/blob/eb7ca48fa06ca85fcf5c5ec9dc4b562ce9a22a76/opencv/program/detect.py
            m = marker_size / 2
            pts = np.float32([[-m, m, m], [-m, -m, m], [m, -m, m], [m, m, m],
                              [-m, m, 0], [-m, -m, 0], [m, -m, 0], [m, m, 0]])
            imgpts, _ = cv2.projectPoints(pts, rvec, tvec, camera_matrix,
                                          camera_distortion)
            imgpts = np.int32(imgpts).reshape(-1, 2)
            cv_image = cv2.drawContours(cv_image, [imgpts[:4]], -1,
                                        (0, 0, 255), 4)
            for i, j in zip(range(4), range(4, 8)):
                cv_image = cv2.line(cv_image, tuple(imgpts[i]),
                                    tuple(imgpts[j]), (0, 0, 255), 4)
            cv_image = cv2.drawContours(cv_image, [imgpts[4:]], -1,
                                        (0, 0, 255), 4)

        # Exibe tela
        cv2.imshow("Camera", cv_image)
        cv2.waitKey(1)
    except CvBridgeError as e:
        print('ex', e)
    ret, frame = cap.read()
    #print(frame.shape) #480x640
    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()
 
    #print(parameters)
 
    '''    detectMarkers(...)
        detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
        mgPoints]]]]) -> corners, ids, rejectedImgPoints
        '''
        #lists of ids and the corners beloning to each id
    center = (0, 0)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
    if corners:
        for corner in corners[0]:
            dim = corner.shape
            print corner
            s = np.sum(corner, axis=0)
            print "sum", s
            s = s / dim[0]
            center = (s[0], s[1])

 
    #It's working.
    # my problem was that the cellphone put black all around it. The alrogithm
    # depends very much upon finding rectangular black blobs
 
    gray = aruco.drawDetectedMarkers(gray, corners)
def ar_marker_pose_estimation(req):
    print("Start AR Marker Pose Estimation")
    ar_pub = rospy.Publisher('/AR/Estimation_image', Image, queue_size=10)
    pose_pub = rospy.Publisher('/AR/Estimation_pose', PoseArray, queue_size=10)
    try:
        gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_250)
        parameters = aruco.DetectorParameters_create()
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,
                    0.0001)
        for corner in corners:
            cv2.cornerSubPix(gray,
                             corner,
                             winSize=(3, 3),
                             zeroZone=(-1, -1),
                             criteria=criteria)
        frame_markers = aruco.drawDetectedMarkers(cv_image.copy(), corners,
                                                  ids)
        markerLength = 0.07
        flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL +
                 cv2.CALIB_FIX_ASPECT_RATIO)
        rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
            corners, markerLength, cameramatrix, dist_coeffs)
        total_rvec = np.zeros([60, 1, 3])
        total_tvec = np.zeros([60, 1, 3])

        length_of_axis = 0.05
        total_center = np.zeros(shape=[60, 1, 2])

        for index in range(len(ids)):
            center = np.array(np.mean(corners[index][0], axis=0), dtype=np.int)
            total_center[ids[index] - 1] = center
            total_rvec[ids[index] - 1] = rvecs[index]
            total_tvec[ids[index] - 1] = tvecs[index]
        imaxis = aruco.drawDetectedMarkers(cv_image.copy(), corners, ids)
        for i in range(len(tvecs)):
            imaxis = aruco.drawAxis(imaxis, cameramatrix, dist_coeffs,
                                    rvecs[i], tvecs[i], length_of_axis)
        # Display the resulting frame
        ar_pub.publish(bridge.cv2_to_imgmsg(imaxis, 'bgr8'))
        ## get only 1 point data from point clouds
        ## uvs must be iterable, so use 2d list.
        pose_arr = PoseArray()
        index_ids = ids.reshape(ids.shape[0])
        for point_i in index_ids:
            pixel_x = int(total_center[point_i - 1][0][0])
            pixel_y = int(total_center[point_i - 1][0][1])
            for p in pc2.read_points(ros_cloud,
                                     field_names=("x", "y", "z"),
                                     uvs=[[pixel_x, pixel_y]]):
                # print('{}  p.x: {}'.format(ids[point_i],p[0]))
                # print('{}  p.y: {}'.format(ids[point_i],p[1]))
                # print('{}  p.z: {}'.format(ids[point_i],p[2]))

                pose_msg = Pose()
                qx = total_rvec[point_i - 1][0][0]
                qy = total_rvec[point_i - 1][0][1]
                qz = total_rvec[point_i - 1][0][2]
                if math.isnan(p[0]):
                    print('is nan')
                    p = [0, 0, 0]
                    p[0] = total_tvec[point_i - 1][0][0]
                    p[1] = total_tvec[point_i - 1][0][1]
                    p[2] = total_tvec[point_i - 1][0][2]
                tx, ty, tz = transform_coor(p[0], p[1], p[2])
                pose_msg.position.x = tx
                pose_msg.position.y = ty
                pose_msg.position.z = tz
                pose_msg.orientation.x = qx
                pose_msg.orientation.y = qy
                pose_msg.orientation.z = qz

                pose_msg.orientation.w = point_i
                pose_arr.poses.append(pose_msg)
        pose_pub.publish(pose_arr)

        # When everything done, release the capture
    except TypeError as e:
        print(e)
    return CameraRequestsResponse(pose_arr)
Example #40
0
if not capture.isOpened:
    print("Unable to open camera feed")
    exit(0)

while True:
    # Read a frame from 'capture' device
    # return false if no frames has been grabbed
    ret, frame = capture.read()

    # ret = true if a frame has been grabbed, else false
    # frame contains a Mat image (ready to be used with openCV)

    # Ensure a frame has been grabbed
    if frame is None:
        break

    # Detect aruco markers and register them in corners / ids (rejectedImgPoints is for debug)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        frame, aruco.getPredefinedDictionary(aruco.DICT_4X4_1000))
    # aruco.estimatePoseSingleMarkers(corners, )

    if ids is not None and len(ids) > 0:
        aruco.drawDetectedMarkers(frame, corners, ids, (0, 0, 255))
    print(rejectedImgPoints)

    cv.imshow("Frame", frame)
    keyboard = cv.waitKey(30)

    if keyboard == 113:
        break
Example #41
0
# Default aruco dictionary
aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)

# Loop over the calibration images and detect markers
for subdir, dirs, files in os.walk('.\src\calibration\images'):

    for file in files:

        # Read into a variable
        frame = cv2.imread(subdir + '\\' + file)

        # Covert to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Detect the markers
        corners, ids, rejected = aruco.detectMarkers(gray, aruco_dict)

        if ids is not None:

            # rvecs and tvecs are the rotation and translation vectors for each of the markers in corners.
            rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                corners, 1, cameraMatrix, distCoeffs)

            for rvec, tvec in zip(rvecs, tvecs):
                aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvec, tvec, 1)

        # Display the pose
        cv2.imshow('frame', frame)

        # Pause before the next frame
        cv2.waitKey(3000)