Example #1
0
    def __init__(self, name):
        """This initializes the object"""
        self.name = name
        self.last_frame = None
        self.current_frame = None
        self.match_frames = None

        #Create an ORB object for keypoint detection
        self.orb = ORB_create(nfeatures=100,
                              scaleFactor=2,
                              edgeThreshold=100,
                              fastThreshold=10)

        #Create a BF object for keypoint matching
        self.bf = BFMatcher(NORM_L1, crossCheck=True)

        #For keeping track of the drone's current velocity
        self.vel_x = None
        self.vel_y = None

        #For plotting the drone's current and past velocities
        self.times = []
        self.x_s = []
        self.y_s = []

        #Creating a publisher that will publish the calculated velocity to the ROS topic /quadrotor/ardrone/calc_vel
        self.pub = rospy.Publisher("/quadrotor/ardrone/calc_vel",
                                   Twist,
                                   queue_size=10)
def image2descriptors(imlist, descriptor='ORB'):
	"""
		For each image in the imlist list
		extract the given descriptor (ORB and SIFT supported)
		
		Return the lists : [keypoint], [descriptor]
	"""
	from cv2 import imread
	desc = []
	key = []
	if(descriptor=='ORB'):
		from cv2 import ORB_create
		for imname in imlist:
			im = imread(imname)
			k, d = ORB_create().detectAndCompute(im, None)
			desc.append(d)
			key.append(k)
	if(descriptor=='BRISK'):
		from cv2 import BRISK_create
		for imname in imlist:
			im = imread(imname)
			k, d = BRISK_create().detectAndCompute(im, None)
			desc.append(d)
			key.append(k)
	elif(descriptor=="SIFT"):
		from cv2.xfeatures2d import SIFT_create
		for imname in imlist:
			im = imread(imname)
			k, d = SIFT_create().detectAndCompute(im, None)
			desc.append(d)
			key.append(k)
	else:
		print 'Desc is not equal to ORB or to SIFT'
	print 'bordel'
	return key, desc
    def __init__(self,
                 action_space,
                 feature_type=None,
                 filter_features=None,
                 max_time_steps=100,
                 distance_threshold=4.0,
                 **kwargs):
        """
        filter_features indicates whether to filter out key points that are not
        on the object in the current image. Key points in the target image are
        always filtered out.
        """
        SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs)
        ServoingEnv.__init__(self,
                             env=self,
                             max_time_steps=max_time_steps,
                             distance_threshold=distance_threshold)

        lens = self.camera_node.node().getLens()
        self._observation_space.spaces['points'] = BoxSpace(
            np.array([-np.inf, lens.getNear(), -np.inf]),
            np.array([np.inf, lens.getFar(), np.inf]))
        film_size = tuple(int(s) for s in lens.getFilmSize())
        self.mask_camera_sensor = Panda3dMaskCameraSensor(
            self.app, (self.skybox_node, self.city_node),
            size=film_size,
            near_far=(lens.getNear(), lens.getFar()),
            hfov=lens.getFov())
        for cam in self.mask_camera_sensor.cam:
            cam.reparentTo(self.camera_sensor.cam)

        self.filter_features = True if filter_features is None else False
        self._feature_type = feature_type or 'sift'
        if cv2.__version__.split('.')[0] == '3':
            from cv2.xfeatures2d import SIFT_create, SURF_create
            from cv2 import ORB_create
            if self.feature_type == 'orb':
                # https://github.com/opencv/opencv/issues/6081
                cv2.ocl.setUseOpenCL(False)
        else:
            SIFT_create = cv2.SIFT
            SURF_create = cv2.SURF
            ORB_create = cv2.ORB
        if self.feature_type == 'sift':
            self._feature_extractor = SIFT_create()
        elif self.feature_type == 'surf':
            self._feature_extractor = SURF_create()
        elif self.feature_type == 'orb':
            self._feature_extractor = ORB_create()
        else:
            raise ValueError("Unknown feature extractor %s" %
                             self.feature_type)
        if self.feature_type == 'orb':
            self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        else:
            self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
        self._target_key_points = None
        self._target_descriptors = None
Example #4
0
    def __init__(self, number_of_features: int = None):
        if number_of_features is None:
            self.number_of_features = self.DEFAULT_NUMBER_OF_FEATURES

        self.features_extractor = ORB_create(
            nfeatures=self.DEFAULT_NUMBER_OF_FEATURES)

        # ORB uses binary descriptors -> use hamming norm (XOR between descriptors)
        self.features_matcher = BFMatcher(NORM_HAMMING, crossCheck=True)
Example #5
0
  def __init__(self,K,nfeatures=5000,norm=NORM_HAMMING,crosscheck=False):
    self.orb = ORB_create(nfeatures)
    self.bfm = BFMatcher(norm,crosscheck)
    self.orb_mask = None
    self.last = None
    self.K = K
    self.Kinv = inv(self.K)

    self.f_est_avg = []
Example #6
0
def AverageCrypt(intLength=32, intCamLoc=0):
    try:
        import numpy as np
        from datetime import datetime
        from cv2 import (FastFeatureDetector_create, ORB_create,
                         SimpleBlobDetector_create, VideoCapture, waitKey,
                         destroyAllWindows, CAP_DSHOW)
    except:
        print("Imports failed")

    #initialise camera
    try:
        cam = VideoCapture(intCamLoc, CAP_DSHOW)
    except:
        cam = "Failed"

    intCount = 0
    intPwdLength = intLength
    strPassword = ""

    fast = FastFeatureDetector_create()
    orb = ORB_create()
    blob = SimpleBlobDetector_create()

    print(datetime.now())

    for intCount in range(0, intPwdLength):
        #collect frame
        ret, frame = cam.read()
        #frame = cv.resize(frame, (320, 480))
        features = fast.detect(frame, None)

        #split channels
        Ch0 = frame[:, :, 0]
        Ch1 = frame[:, :, 1]
        Ch2 = frame[:, :, 2]

        #average frames
        intCh0 = np.mean(Ch0)
        intCh1 = np.mean(Ch1)
        intCh2 = np.mean(Ch2)

        #initialise seed
        #				R					G					B
        intSeed = int(intCh0) << 32 | int(intCh1) << 16 | int(intCh2) << 0
        letter = 32 + ((intSeed % 126) - 32)

        #add letter to array
        strPassword += str(chr(letter))
        waitKey(1)

    cam.release()
    destroyAllWindows()

    return strPassword
Example #7
0
def SeekDuelist():
    for times in range(4):
        info("SeekDuelist() getting screenshot")
        #get screen image
        Screenshot()
        #iterate through sprites
        info("SeekDuelist() iterating through duelists")
        for image in listdir("TEMPLATES\\characters\\"):
            #set sprite
            pic="TEMPLATES\\characters\\"+image
            #read sprite image
            img1= imread(pic,IMREAD_GRAYSCALE)
            #read screen image
            img2= imread("screen.png",IMREAD_GRAYSCALE)

            #set ORB object
            orb= ORB_create(100000)

            #feature detect sprite
            kp1, des1= orb.detectAndCompute(img1, None)
            #feature detect screen
            kp2, des2= orb.detectAndCompute(img2, None)

            #match features from sprite and screen
            bf = BFMatcher(NORM_HAMMING, crossCheck=True)
            matches = bf.match(des1, des2)
            #sort accuracy of matches
            matches = sorted(matches,key=lambda image:image.distance)

            #select accurate matches
            found=[]
            for m in matches:
                if m.distance<=20:
                    found.append(m.distance)

            #check if accurate matches were found
            if len(found)!=0:

                #set image info
                img2_idx = matches[0].trainIdx
                
                # Get the coordinates
                # x - left to right
                # y - top to bottom
                (x,y) = kp2[img2_idx].pt
                info("SeekDuelist() match found for "+image+" at ("+str(int(x))+", "+str(int(y))+")")
                return (int(x),int(y))
        #Scroll one down to change screens
        info("SeekDuelist() no matches found. scrolling")
        mouse_event(MOUSEEVENTF_WHEEL, 0, 0, -1, 0)
        sleep(2)
    info("SeekDuelist() no matches found. raising exception")
    raise Exception
Example #8
0
    def algorithm_ORB(self,
                      photo,
                      screen,
                      screen_colored,
                      nfeatures=4500,
                      descriptor_matcher_name='BruteForce-Hamming'):

        t1 = time.perf_counter()

        # Init algorithm
        orb = ORB_create(nfeatures)

        t2 = time.perf_counter()

        self.writeLog('Created ORB object - {}ms'.format(
            self.formatTimeDiff(t1, t2)))

        # Detect and compute
        kp_photo, des_photo = orb.detectAndCompute(photo, None)
        kp_screen, des_screen = orb.detectAndCompute(screen, None)

        t3 = time.perf_counter()
        self.writeLog('Detected keypoints - {}ms'.format(
            self.formatTimeDiff(t2, t3)))

        # Descriptor Matcher
        try:
            descriptor_matcher = DescriptorMatcher_create(
                descriptor_matcher_name)
        except:
            return False

        t4 = time.perf_counter()
        self.writeLog('Initialized Descriptor Matcher - {}ms'.format(
            self.formatTimeDiff(t3, t4)))

        # Calc knn Matches
        try:
            matches = descriptor_matcher.knnMatch(des_photo, des_screen, k=2)
        except:
            return False

        t5 = time.perf_counter()
        self.writeLog('Calced knn matches - {}ms'.format(
            self.formatTimeDiff(t4, t5)))

        if not matches or len(matches) == 0:
            return False

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append(m)

        t6 = time.perf_counter()
        self.writeLog('Filtered good matches - {}ms'.format(
            self.formatTimeDiff(t5, t6)))

        if not good or len(good) < 20:
            return False

        photo_pts = np.float32([kp_photo[m.queryIdx].pt
                                for m in good]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args
        screen_pts = np.float32([kp_screen[m.trainIdx].pt
                                 for m in good]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args

        M, _ = findHomography(photo_pts, screen_pts, RANSAC, 5.0)

        t7 = time.perf_counter()
        self.writeLog('Found Homography - {}ms'.format(
            self.formatTimeDiff(t6, t7)))

        if M is None or not M.any() or len(M) == 0:
            return False

        h, w = photo.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args
        dst = perspectiveTransform(pts, M)

        t8 = time.perf_counter()
        self.writeLog('Perspective Transform - {}ms'.format(
            self.formatTimeDiff(t7, t8)))

        minX = dst[0][0][0]
        minY = dst[0][0][1]
        maxX = dst[0][0][0]
        maxY = dst[0][0][1]

        for i in range(4):
            if dst[i][0][0] < minX:
                minX = dst[i][0][0]
            if dst[i][0][0] > maxX:
                maxX = dst[i][0][0]
            if dst[i][0][1] < minY:
                minY = dst[i][0][1]
            if dst[i][0][1] > maxY:
                maxY = dst[i][0][1]

        minX = int(minX)
        minY = int(minY)
        maxX = int(maxX)
        maxY = int(maxY)

        if minX < 0:
            minX = 0
        if minY < 0:
            minY = 0

        logging.info('minY {}'.format(int(minY)))
        logging.info('minX {}'.format(int(minX)))
        logging.info('maxY {}'.format(int(maxY)))
        logging.info('maxX {}'.format(int(maxX)))

        if maxX - minX <= 0:
            return False
        if maxY - minY <= 0:
            return False

        imwrite(self.match_dir + '/result.png',
                screen_colored[int(minY):int(maxY),
                               int(minX):int(maxX)])

        t9 = time.perf_counter()
        self.writeLog('Wrote Image - {}ms'.format(self.formatTimeDiff(t8, t9)))

        return True
Example #9
0
try:
	cam = VideoCapture(0)
except:
	cam = "Failed"
	
intCount = 0
intPwdLength = 32

strPassword = ""
strFileName = "./Results/Result{0}.png".format(datetime.now())
strFileName = strFileName.replace(":", "")
strFileName = strFileName.replace(" ", "")
print(strFileName)

fast = FastFeatureDetector_create()
orb = ORB_create()
blob = SimpleBlobDetector_create()

arryPlotX = np.zeros(126)
arryPlotY = np.zeros(126)

arryFastStdDev = np.zeros(intPwdLength)
arryOrbStdDev = np.zeros(intPwdLength)
arryBlobStdDev = np.zeros(intPwdLength)

for i in range(0, 126):
	arryPlotY[i] = i

print(datetime.now())

for intCount in range(0, intPwdLength):
Example #10
0
 def test_ORB(self):
     from cv2 import ORB_create
     test_orb = ORB_create()
 def __init__(self, num_features=2000, scale_factor=1.2, num_levels=8):
     print('Using Orb Feature 2D')
     self.orb_extractor = ORB_create(num_features, scale_factor, num_levels)