Beispiel #1
0
    def __init__(self, name):
        """This initializes the object"""
        self.name = name
        self.last_frame = None
        self.current_frame = None
        self.match_frames = None

        #Create an ORB object for keypoint detection
        self.orb = ORB_create(nfeatures=100,
                              scaleFactor=2,
                              edgeThreshold=100,
                              fastThreshold=10)

        #Create a BF object for keypoint matching
        self.bf = BFMatcher(NORM_L1, crossCheck=True)

        #For keeping track of the drone's current velocity
        self.vel_x = None
        self.vel_y = None

        #For plotting the drone's current and past velocities
        self.times = []
        self.x_s = []
        self.y_s = []

        #Creating a publisher that will publish the calculated velocity to the ROS topic /quadrotor/ardrone/calc_vel
        self.pub = rospy.Publisher("/quadrotor/ardrone/calc_vel",
                                   Twist,
                                   queue_size=10)
    def __init__(self,
                 action_space,
                 feature_type=None,
                 filter_features=None,
                 max_time_steps=100,
                 distance_threshold=4.0,
                 **kwargs):
        """
        filter_features indicates whether to filter out key points that are not
        on the object in the current image. Key points in the target image are
        always filtered out.
        """
        SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs)
        ServoingEnv.__init__(self,
                             env=self,
                             max_time_steps=max_time_steps,
                             distance_threshold=distance_threshold)

        lens = self.camera_node.node().getLens()
        self._observation_space.spaces['points'] = BoxSpace(
            np.array([-np.inf, lens.getNear(), -np.inf]),
            np.array([np.inf, lens.getFar(), np.inf]))
        film_size = tuple(int(s) for s in lens.getFilmSize())
        self.mask_camera_sensor = Panda3dMaskCameraSensor(
            self.app, (self.skybox_node, self.city_node),
            size=film_size,
            near_far=(lens.getNear(), lens.getFar()),
            hfov=lens.getFov())
        for cam in self.mask_camera_sensor.cam:
            cam.reparentTo(self.camera_sensor.cam)

        self.filter_features = True if filter_features is None else False
        self._feature_type = feature_type or 'sift'
        if cv2.__version__.split('.')[0] == '3':
            from cv2.xfeatures2d import SIFT_create, SURF_create
            from cv2 import ORB_create
            if self.feature_type == 'orb':
                # https://github.com/opencv/opencv/issues/6081
                cv2.ocl.setUseOpenCL(False)
        else:
            SIFT_create = cv2.SIFT
            SURF_create = cv2.SURF
            ORB_create = cv2.ORB
        if self.feature_type == 'sift':
            self._feature_extractor = SIFT_create()
        elif self.feature_type == 'surf':
            self._feature_extractor = SURF_create()
        elif self.feature_type == 'orb':
            self._feature_extractor = ORB_create()
        else:
            raise ValueError("Unknown feature extractor %s" %
                             self.feature_type)
        if self.feature_type == 'orb':
            self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        else:
            self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
        self._target_key_points = None
        self._target_descriptors = None
Beispiel #3
0
  def __init__(self,K,nfeatures=5000,norm=NORM_HAMMING,crosscheck=False):
    self.orb = ORB_create(nfeatures)
    self.bfm = BFMatcher(norm,crosscheck)
    self.orb_mask = None
    self.last = None
    self.K = K
    self.Kinv = inv(self.K)

    self.f_est_avg = []
Beispiel #4
0
    def __init__(self, number_of_features: int = None):
        if number_of_features is None:
            self.number_of_features = self.DEFAULT_NUMBER_OF_FEATURES

        self.features_extractor = ORB_create(
            nfeatures=self.DEFAULT_NUMBER_OF_FEATURES)

        # ORB uses binary descriptors -> use hamming norm (XOR between descriptors)
        self.features_matcher = BFMatcher(NORM_HAMMING, crossCheck=True)
Beispiel #5
0
def SeekDuelist():
    for times in range(4):
        info("SeekDuelist() getting screenshot")
        #get screen image
        Screenshot()
        #iterate through sprites
        info("SeekDuelist() iterating through duelists")
        for image in listdir("TEMPLATES\\characters\\"):
            #set sprite
            pic="TEMPLATES\\characters\\"+image
            #read sprite image
            img1= imread(pic,IMREAD_GRAYSCALE)
            #read screen image
            img2= imread("screen.png",IMREAD_GRAYSCALE)

            #set ORB object
            orb= ORB_create(100000)

            #feature detect sprite
            kp1, des1= orb.detectAndCompute(img1, None)
            #feature detect screen
            kp2, des2= orb.detectAndCompute(img2, None)

            #match features from sprite and screen
            bf = BFMatcher(NORM_HAMMING, crossCheck=True)
            matches = bf.match(des1, des2)
            #sort accuracy of matches
            matches = sorted(matches,key=lambda image:image.distance)

            #select accurate matches
            found=[]
            for m in matches:
                if m.distance<=20:
                    found.append(m.distance)

            #check if accurate matches were found
            if len(found)!=0:

                #set image info
                img2_idx = matches[0].trainIdx
                
                # Get the coordinates
                # x - left to right
                # y - top to bottom
                (x,y) = kp2[img2_idx].pt
                info("SeekDuelist() match found for "+image+" at ("+str(int(x))+", "+str(int(y))+")")
                return (int(x),int(y))
        #Scroll one down to change screens
        info("SeekDuelist() no matches found. scrolling")
        mouse_event(MOUSEEVENTF_WHEEL, 0, 0, -1, 0)
        sleep(2)
    info("SeekDuelist() no matches found. raising exception")
    raise Exception
Beispiel #6
0
class ORBDetector(Detector):
    def __init__(self):
        Detector.__init__(self)
        self.ORB = ORB_create()

    def __repr__(self):
        return "ORBDetector(ORB={ORB})".format(ORB=self.ORB)

    def detect(self, image):
        keypoints = self.ORB.detect(image.cv_image, None)
        keypoints, descriptors = self.ORB.compute(image.cv_image, keypoints)
        return keypoints, descriptors
def image2descriptors(imlist, descriptor='ORB'):
	"""
		For each image in the imlist list
		extract the given descriptor (ORB and SIFT supported)
		
		Return the lists : [keypoint], [descriptor]
	"""
	from cv2 import imread
	desc = []
	key = []
	if(descriptor=='ORB'):
		from cv2 import ORB_create
		for imname in imlist:
			im = imread(imname)
			k, d = ORB_create().detectAndCompute(im, None)
			desc.append(d)
			key.append(k)
	if(descriptor=='BRISK'):
		from cv2 import BRISK_create
		for imname in imlist:
			im = imread(imname)
			k, d = BRISK_create().detectAndCompute(im, None)
			desc.append(d)
			key.append(k)
	elif(descriptor=="SIFT"):
		from cv2.xfeatures2d import SIFT_create
		for imname in imlist:
			im = imread(imname)
			k, d = SIFT_create().detectAndCompute(im, None)
			desc.append(d)
			key.append(k)
	else:
		print 'Desc is not equal to ORB or to SIFT'
	print 'bordel'
	return key, desc
class OrbFeature2D:
    def __init__(self, num_features=2000, scale_factor=1.2, num_levels=8):
        print('Using Orb Feature 2D')
        self.orb_extractor = ORB_create(num_features, scale_factor, num_levels)

    # extract keypoints
    def detect(self, img):
        # detect
        kps_tuples = self.orb_extractor.detect(img)
        # convert keypoints
        kps = [cv2.KeyPoint(*kp) for kp in kps_tuples]
        return kps

    def detectAndCompute(self, img):
        #detect and compute
        kps, des = self.orb_extractor.detectAndCompute(img, None)
        return kps, des
Beispiel #9
0
def AverageCrypt(intLength=32, intCamLoc=0):
    try:
        import numpy as np
        from datetime import datetime
        from cv2 import (FastFeatureDetector_create, ORB_create,
                         SimpleBlobDetector_create, VideoCapture, waitKey,
                         destroyAllWindows, CAP_DSHOW)
    except:
        print("Imports failed")

    #initialise camera
    try:
        cam = VideoCapture(intCamLoc, CAP_DSHOW)
    except:
        cam = "Failed"

    intCount = 0
    intPwdLength = intLength
    strPassword = ""

    fast = FastFeatureDetector_create()
    orb = ORB_create()
    blob = SimpleBlobDetector_create()

    print(datetime.now())

    for intCount in range(0, intPwdLength):
        #collect frame
        ret, frame = cam.read()
        #frame = cv.resize(frame, (320, 480))
        features = fast.detect(frame, None)

        #split channels
        Ch0 = frame[:, :, 0]
        Ch1 = frame[:, :, 1]
        Ch2 = frame[:, :, 2]

        #average frames
        intCh0 = np.mean(Ch0)
        intCh1 = np.mean(Ch1)
        intCh2 = np.mean(Ch2)

        #initialise seed
        #				R					G					B
        intSeed = int(intCh0) << 32 | int(intCh1) << 16 | int(intCh2) << 0
        letter = 32 + ((intSeed % 126) - 32)

        #add letter to array
        strPassword += str(chr(letter))
        waitKey(1)

    cam.release()
    destroyAllWindows()

    return strPassword
Beispiel #10
0
class OrbFeaturesHandler(FeaturesHandlerAbstractBase):
    """
    This class implements an ORB features exractor
    """
    features_extractor: ORB
    features_matcher: BFMatcher

    DEFAULT_DISTANCE_THRESHOLD_FOR_SUCCESSFULL_FEATURE_MATCH: int = 10

    DEFAULT_NUMBER_OF_FEATURES = 1000
    number_of_features: int

    def __init__(self, number_of_features: int = None):
        if number_of_features is None:
            self.number_of_features = self.DEFAULT_NUMBER_OF_FEATURES

        self.features_extractor = ORB_create(
            nfeatures=self.DEFAULT_NUMBER_OF_FEATURES)

        # ORB uses binary descriptors -> use hamming norm (XOR between descriptors)
        self.features_matcher = BFMatcher(NORM_HAMMING, crossCheck=True)

    def extract_features(self, frame: np.ndarray) -> ProcessedFrameData:
        """
        This method extracts ORB features
        """
        list_of_keypoints, descriptors = self.features_extractor.detectAndCompute(
            image=frame, mask=None)
        return ProcessedFrameData.build(frame=frame,
                                        list_of_keypoints=list_of_keypoints,
                                        descriptors=descriptors)

    def match_features(self, frame_1: ProcessedFrameData,
                       frame_2: ProcessedFrameData):
        """
        This method matches ORB features
        based on https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
        """
        list_of_matches: List[DMatch] = self.features_matcher.match(
            queryDescriptors=frame_1.descriptors,
            trainDescriptors=frame_2.descriptors)
        return sorted(list_of_matches, key=lambda x: x.distance)

        # # Sort them in the order of their distance.
        # return

    def is_handler_capable(self, frame: np.ndarray) -> bool:
        """
        This method implements ORB handler capability test
        """
        extracted_features: ProcessedFrameData = self.extract_features(
            frame=frame)
        return len(extracted_features.list_of_keypoints) >= int(
            0.9 * self.DEFAULT_NUMBER_OF_FEATURES)
Beispiel #11
0
    def algorithm_ORB(self,
                      photo,
                      screen,
                      screen_colored,
                      nfeatures=4500,
                      descriptor_matcher_name='BruteForce-Hamming'):

        t1 = time.perf_counter()

        # Init algorithm
        orb = ORB_create(nfeatures)

        t2 = time.perf_counter()

        self.writeLog('Created ORB object - {}ms'.format(
            self.formatTimeDiff(t1, t2)))

        # Detect and compute
        kp_photo, des_photo = orb.detectAndCompute(photo, None)
        kp_screen, des_screen = orb.detectAndCompute(screen, None)

        t3 = time.perf_counter()
        self.writeLog('Detected keypoints - {}ms'.format(
            self.formatTimeDiff(t2, t3)))

        # Descriptor Matcher
        try:
            descriptor_matcher = DescriptorMatcher_create(
                descriptor_matcher_name)
        except:
            return False

        t4 = time.perf_counter()
        self.writeLog('Initialized Descriptor Matcher - {}ms'.format(
            self.formatTimeDiff(t3, t4)))

        # Calc knn Matches
        try:
            matches = descriptor_matcher.knnMatch(des_photo, des_screen, k=2)
        except:
            return False

        t5 = time.perf_counter()
        self.writeLog('Calced knn matches - {}ms'.format(
            self.formatTimeDiff(t4, t5)))

        if not matches or len(matches) == 0:
            return False

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append(m)

        t6 = time.perf_counter()
        self.writeLog('Filtered good matches - {}ms'.format(
            self.formatTimeDiff(t5, t6)))

        if not good or len(good) < 20:
            return False

        photo_pts = np.float32([kp_photo[m.queryIdx].pt
                                for m in good]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args
        screen_pts = np.float32([kp_screen[m.trainIdx].pt
                                 for m in good]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args

        M, _ = findHomography(photo_pts, screen_pts, RANSAC, 5.0)

        t7 = time.perf_counter()
        self.writeLog('Found Homography - {}ms'.format(
            self.formatTimeDiff(t6, t7)))

        if M is None or not M.any() or len(M) == 0:
            return False

        h, w = photo.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args
        dst = perspectiveTransform(pts, M)

        t8 = time.perf_counter()
        self.writeLog('Perspective Transform - {}ms'.format(
            self.formatTimeDiff(t7, t8)))

        minX = dst[0][0][0]
        minY = dst[0][0][1]
        maxX = dst[0][0][0]
        maxY = dst[0][0][1]

        for i in range(4):
            if dst[i][0][0] < minX:
                minX = dst[i][0][0]
            if dst[i][0][0] > maxX:
                maxX = dst[i][0][0]
            if dst[i][0][1] < minY:
                minY = dst[i][0][1]
            if dst[i][0][1] > maxY:
                maxY = dst[i][0][1]

        minX = int(minX)
        minY = int(minY)
        maxX = int(maxX)
        maxY = int(maxY)

        if minX < 0:
            minX = 0
        if minY < 0:
            minY = 0

        logging.info('minY {}'.format(int(minY)))
        logging.info('minX {}'.format(int(minX)))
        logging.info('maxY {}'.format(int(maxY)))
        logging.info('maxX {}'.format(int(maxX)))

        if maxX - minX <= 0:
            return False
        if maxY - minY <= 0:
            return False

        imwrite(self.match_dir + '/result.png',
                screen_colored[int(minY):int(maxY),
                               int(minX):int(maxX)])

        t9 = time.perf_counter()
        self.writeLog('Wrote Image - {}ms'.format(self.formatTimeDiff(t8, t9)))

        return True
Beispiel #12
0
try:
	cam = VideoCapture(0)
except:
	cam = "Failed"
	
intCount = 0
intPwdLength = 32

strPassword = ""
strFileName = "./Results/Result{0}.png".format(datetime.now())
strFileName = strFileName.replace(":", "")
strFileName = strFileName.replace(" ", "")
print(strFileName)

fast = FastFeatureDetector_create()
orb = ORB_create()
blob = SimpleBlobDetector_create()

arryPlotX = np.zeros(126)
arryPlotY = np.zeros(126)

arryFastStdDev = np.zeros(intPwdLength)
arryOrbStdDev = np.zeros(intPwdLength)
arryBlobStdDev = np.zeros(intPwdLength)

for i in range(0, 126):
	arryPlotY[i] = i

print(datetime.now())

for intCount in range(0, intPwdLength):
Beispiel #13
0
 def test_ORB(self):
     from cv2 import ORB_create
     test_orb = ORB_create()
Beispiel #14
0
class KPExtractor(object):
  def __init__(self,K,nfeatures=5000,norm=NORM_HAMMING,crosscheck=False):
    self.orb = ORB_create(nfeatures)
    self.bfm = BFMatcher(norm,crosscheck)
    self.orb_mask = None
    self.last = None
    self.K = K
    self.Kinv = inv(self.K)

    self.f_est_avg = []

  def normalize(self,pts):
    return dot(self.Kinv,self.add_ones(pts).T).T[:, 0:2]

  def denormalize(self,pt):
    ret = dot(self.K, array([pt[0],pt[1],1.0]).T)
    ret /= ret[2]
    return int(round(ret[0])),int(round(ret[1])),int(round(ret[2]))

  def build_orb_mask(self,frame):
    h,w= frame.shape
    c = 1
    self.orb_mask = zeros((h,w,c),dtype=uint8)
    rectangle(self.orb_mask,(0,0),(w,int(h*0.75)),(255,255,255),-1)
    # self.orb_mask = cvtColor(self.orb_mask,COLOR_BGR2GRAY)

  def extractRt(self,E):
    U,w,Vt = svd(E)
    assert det(U) > 0
    if det(Vt) < 0:
      Vt *= -1.0

    #Find R and T from Hartleyy and Zisserman
    W = mat([[0,-1,0],[1,0,0],[0,0,1]],dtype=float)
    R = dot(dot(U,W),Vt)
    if sum(R.diagonal()) < 0:
      R = dot(dot(U,W.T),Vt)
    t = U[:,2]
    Rt = concatenate([R,t.reshape(3,1)],axis=1)
    return Rt
      
  # [x,y] to [x,y,1]
  def add_ones(self,x):
    return concatenate([x, ones((x.shape[0],1))],axis=1)

  def extract(self,frame,maxCorners=5000,qualityLevel=.01,minDistance=3):
    #Detect
    feats = goodFeaturesToTrack(frame,maxCorners,qualityLevel,minDistance)#,mask=self.orb_mask)
    #Extract
    kps = [KeyPoint(x=f[0][0],y=f[0][1],_size=20) for f in feats]
    kps,des = self.orb.compute(frame,kps)

    
    #Match
    ret = []
    if self.last is not None:
      #THE QUERY IMAGE IS THE ACTUAL IMAGE &
      #THE TRAIN IMAGE IS THE LAST IMAGE
      #U STOUPID KUNT
      matches = self.bfm.knnMatch(des,self.last['des'],k=2)
      for m,n in matches:
        if m.distance < 0.7 * n.distance:
          ret.append((kps[m.queryIdx].pt,self.last['kps'][m.trainIdx].pt))



    #filter   
    Rt = None
    if len(ret) > 0:
      ret = array(ret)
      #Normalize
      ret[:,0,:] = self.normalize(ret[:,0,:])
      ret[:,1,:] = self.normalize(ret[:,1,:])

      try:
        # print(f"{len(ret)=}, {ret[:,0].shape=}, {ret[:,1].shape=}")
        model, inliers = ransac((ret[:, 0],ret[:, 1]),
                                  #FundamentalMatrixTransform,
                                  EssentialMatrixTransform,
                                  min_samples=8,
                                  residual_threshold=0.005,
                                  max_trials=200)

        ret = ret[inliers]

        Rt = self.extractRt(model.params)
      except:
        pass

    self.last = {'kps': kps,'des':des}
    return ret,Rt


  #warp is pattern for now
  def homography(self,last_puzzle,puzzle,warp,maxCorners=5000,qualityLevel=.01,minDistance=3):
    #Detect
    f_feats = goodFeaturesToTrack(frame,maxCorners,qualityLevel,minDistance)#,mask=self.orb_mask)
    #Extract
    f_kps = [KeyPoint(x=f[0][0],y=f[0][1],_size=20) for f in f_feats]
    f_kps,f_des = self.orb.compute(frame,f_kps)

    #Detect
    w_feats = goodFeaturesToTrack(warp,maxCorners,qualityLevel,minDistance)#,mask=self.orb_mask)
    #Extract
    w_kps = [KeyPoint(x=f[0][0],y=f[0][1],_size=20) for f in w_feats]
    w_kps,w_des = self.orb.compute(warp,w_kps)

    #Match
    ret = []
    
    # #THE QUERY IMAGE IS THE ACTUAL IMAGE &
    # #THE TRAIN IMAGE IS THE LAST IMAGE
    # #U STOUPID KUNT
    matches = self.bfm.knnMatch(w_des,f_des,k=2)
    for m,n in matches:
      # if m.distance <= 1 * n.distance:
      ret.append((w_kps[m.queryIdx].pt,f_kps[m.trainIdx].pt))

    #filter   
    Rt = None
    H, mask, warp_pts, orig_pts = None,None,None,None
    if len(ret) > 0:
        warp_pts = float32([r[0] for r in ret]).reshape(-1,1,2)
        orig_pts = float32([r[1] for r in ret]).reshape(-1,1,2)
        H,mask = findHomography(warp_pts,orig_pts,RANSAC,5.0)


    return H,mask,warp_pts,orig_pts

  def project_matrix(self,homography):
    homography *= (-1)
    rot_and_transl = dot(self.Kinv, homography)
    col_1 = rot_and_transl[:, 0]
    col_2 = rot_and_transl[:, 1]
    col_3 = rot_and_transl[:, 2]
    # normalise vectors
    l = sqrt(norm(col_1, 2) * norm(col_2, 2))
    rot_1 = col_1 / l
    rot_2 = col_2 / l
    translation = col_3 / l
    # compute the orthonormal basis
    c = rot_1 + rot_2
    p = cross(rot_1, rot_2)
    d = cross(c, p)
    rot_1 = dot(c / norm(c, 2) + d / norm(d, 2), 1 / sqrt(2))
    rot_2 = dot(c / norm(c, 2) - d / norm(d, 2), 1 / sqrt(2))
    rot_3 = cross(rot_1, rot_2)
    # finally, compute the 3D projection matrix from the model to the current frame
    projection = stack((rot_1, rot_2, rot_3, translation)).T
    return dot(self.K, projection)
class ServoingDesignedFeaturesSimpleQuadPanda3dEnv(SimpleQuadPanda3dEnv,
                                                   ServoingEnv):
    def __init__(self,
                 action_space,
                 feature_type=None,
                 filter_features=None,
                 max_time_steps=100,
                 distance_threshold=4.0,
                 **kwargs):
        """
        filter_features indicates whether to filter out key points that are not
        on the object in the current image. Key points in the target image are
        always filtered out.
        """
        SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs)
        ServoingEnv.__init__(self,
                             env=self,
                             max_time_steps=max_time_steps,
                             distance_threshold=distance_threshold)

        lens = self.camera_node.node().getLens()
        self._observation_space.spaces['points'] = BoxSpace(
            np.array([-np.inf, lens.getNear(), -np.inf]),
            np.array([np.inf, lens.getFar(), np.inf]))
        film_size = tuple(int(s) for s in lens.getFilmSize())
        self.mask_camera_sensor = Panda3dMaskCameraSensor(
            self.app, (self.skybox_node, self.city_node),
            size=film_size,
            near_far=(lens.getNear(), lens.getFar()),
            hfov=lens.getFov())
        for cam in self.mask_camera_sensor.cam:
            cam.reparentTo(self.camera_sensor.cam)

        self.filter_features = True if filter_features is None else False
        self._feature_type = feature_type or 'sift'
        if cv2.__version__.split('.')[0] == '3':
            from cv2.xfeatures2d import SIFT_create, SURF_create
            from cv2 import ORB_create
            if self.feature_type == 'orb':
                # https://github.com/opencv/opencv/issues/6081
                cv2.ocl.setUseOpenCL(False)
        else:
            SIFT_create = cv2.SIFT
            SURF_create = cv2.SURF
            ORB_create = cv2.ORB
        if self.feature_type == 'sift':
            self._feature_extractor = SIFT_create()
        elif self.feature_type == 'surf':
            self._feature_extractor = SURF_create()
        elif self.feature_type == 'orb':
            self._feature_extractor = ORB_create()
        else:
            raise ValueError("Unknown feature extractor %s" %
                             self.feature_type)
        if self.feature_type == 'orb':
            self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        else:
            self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
        self._target_key_points = None
        self._target_descriptors = None

    @property
    def feature_type(self):
        return self._feature_type

    @property
    def target_key_points(self):
        return self._target_key_points

    @property
    def target_descriptors(self):
        return self._target_descriptors

    def _step(self, action):
        obs, reward, done, info = SimpleQuadPanda3dEnv.step(self, action)
        done = done or obs.get('points') is None
        return obs, reward, done, info

    def step(self, action):
        return ServoingEnv.step(self, action)

    def reset(self, state=None):
        self._target_key_points = None
        self._target_descriptors = None
        self._target_obs = None
        self._target_pos = self.get_relative_target_position()
        self._t = 0
        return SimpleQuadPanda3dEnv.reset(self, state=state)

    def observe(self):
        obs = SimpleQuadPanda3dEnv.observe(self)
        assert isinstance(obs, dict)

        mask, depth_image, _ = self.mask_camera_sensor.observe(
        )  # used to filter out key points
        if self._target_obs is None:
            self._target_obs = {'target_' + k: v for (k, v) in obs.items()}
            self._target_key_points, self._target_descriptors = \
                self._feature_extractor.detectAndCompute(self._target_obs['target_image'], mask)

        if self._target_key_points:
            key_points, descriptors = \
                self._feature_extractor.detectAndCompute(obs['image'], mask if self.filter_features else None)
            matches = self._matcher.match(descriptors,
                                          self._target_descriptors)
        else:
            matches = False

        if matches:
            key_points_xy = np.array(
                [key_points[match.queryIdx].pt for match in matches])
            target_key_points_xy = np.array([
                self._target_key_points[match.trainIdx].pt for match in matches
            ])
            key_points_XYZ = xy_depth_to_XYZ(self.camera_sensor.lens,
                                             key_points_xy, depth_image)
            target_key_points_XYZ = xy_depth_to_XYZ(self.camera_sensor.lens,
                                                    target_key_points_xy,
                                                    depth_image)
            obs['points'] = key_points_XYZ
            obs['target_points'] = target_key_points_XYZ
        else:
            obs['points'] = None
            obs['target_points'] = None

        obs.update(self._target_obs)
        return obs
Beispiel #16
0
class ROS_Video_Node():
    def __init__(self, name):
        """This initializes the object"""
        self.name = name
        self.last_frame = None
        self.current_frame = None
        self.match_frames = None

        #Create an ORB object for keypoint detection
        self.orb = ORB_create(nfeatures=100,
                              scaleFactor=2,
                              edgeThreshold=100,
                              fastThreshold=10)

        #Create a BF object for keypoint matching
        self.bf = BFMatcher(NORM_L1, crossCheck=True)

        #For keeping track of the drone's current velocity
        self.vel_x = None
        self.vel_y = None

        #For plotting the drone's current and past velocities
        self.times = []
        self.x_s = []
        self.y_s = []

        #Creating a publisher that will publish the calculated velocity to the ROS topic /quadrotor/ardrone/calc_vel
        self.pub = rospy.Publisher("/quadrotor/ardrone/calc_vel",
                                   Twist,
                                   queue_size=10)

    def detect_motion(self):
        """This function detects the motion between the current and last frame."""
        if (self.last_frame == self.current_frame).all():
            print(
                "Beep boop! I think my current frame is exactly the same as my last frame. \nEither I'm not moving, or something is very wrong.\n"
            )
            namedWindow(self.name)
            imshow(self.name, self.current_frame)
            waitKey(1)
            return None

        #This finds the keypoints and descriptors with SIFT
        kp1, des1 = self.orb.detectAndCompute(self.last_frame, None)
        kp2, des2 = self.orb.detectAndCompute(self.current_frame, None)

        #Match descriptors
        matches = self.bf.match(des1, des2)

        #Sort them in the order of their distance
        matches = sorted(matches, key=lambda x: x.distance)

        #This is a test to see if I can filter out bad matches from the getgo
        #Right now I've set a threshold that I think will keep only bad matches to see if it makes the data super noisy
        # print("MATCH DISTANCES")
        #undoiing test, ELEPHANT
        filtered_matches = matches
        # filtered_matches = []
        # for match in matches:
        #     if match.distance > 1000:
        #         filtered_matches.append(match)

        if len(filtered_matches) < 5:
            print(
                "Beep boop! Not enough good matches were found. This data is unreliable. \n Try moving me above the building, please.\n"
            )
            namedWindow(self.name)
            imshow(self.name, self.current_frame)
            waitKey(1)
            return None

        #create arrays of the coordinates for keypoints in the two frames
        kp1_coords = asarray(
            [kp1[mat.queryIdx].pt for mat in filtered_matches])
        kp2_coords = asarray(
            [kp2[mat.trainIdx].pt for mat in filtered_matches])

        #calculate the translations needed to get from the first set of keypoints to the next set of keypoints
        translations = kp2_coords - kp1_coords

        least_error = 5
        for translation in translations:
            a = translation[0] * ones((translations.shape[0], 1))
            b = translation[1] * ones((translations.shape[0], 1))
            test_translation = concatenate((a, b), axis=1)
            test_coords = kp1_coords + test_translation
            error = sqrt(mean(square(kp2_coords - test_coords)))
            if error < least_error:
                least_error = error
                best_translation = translation

        self.vel_x = translation[0]
        self.vel_y = translation[1]

        # Draw first matches.
        draw_params = dict(
            matchColor=(0, 255, 0),  # draw matches in green color
            singlePointColor=None,
            matchesMask=None,  # draw only inliers
            flags=2)
        self.match_frames = drawMatches(self.last_frame, kp1,
                                        self.current_frame, kp2, matches, None,
                                        **draw_params)

        namedWindow(self.name)
        imshow(self.name, self.match_frames)
        waitKey(1)

        if least_error < 2:
            print("This is a new match")
            print("X velocity: ", self.vel_x)
            print("Y velocity: ", self.vel_y)
            print("least_error: ", least_error)

            #Publish the velocities found
            calc_vel = Twist()
            calc_vel.linear.x = self.vel_x
            calc_vel.linear.y = self.vel_y

            self.pub.publish(calc_vel)

    def plot_current_vel(self):
        """This function live plots the quadrotor's measured velocity"""
        #save the current velocities
        self.times.append(time())
        self.x_s.append(self.vel_x)
        self.y_s.append(self.vel_y)

        #plot the velocities
        plt.title("Live Velocities")
        plt.ylabel("Velocity")
        plt.xlabel("Time")
        plt.ylim(-20, 20)
        try:
            plt.plot(self.times[-60:],
                     self.x_s[-60:],
                     'r-',
                     label="X Velocity")
            plt.plot(self.times[-60:],
                     self.y_s[-60:],
                     'b-',
                     label="Y Velocity")
        except:
            plt.plot(self.times, self.x_s, 'r-', label="X Velocity")
            plt.plot(self.times, self.y_s, 'b-', label="Y Velocity")
        plt.legend(loc="upper right")
Beispiel #17
0
 def __init__(self):
     Detector.__init__(self)
     self.ORB = ORB_create()
 def __init__(self, num_features=2000, scale_factor=1.2, num_levels=8):
     print('Using Orb Feature 2D')
     self.orb_extractor = ORB_create(num_features, scale_factor, num_levels)