예제 #1
0
def SeekDuelist():
    for times in range(4):
        info("SeekDuelist() getting screenshot")
        #get screen image
        Screenshot()
        #iterate through sprites
        info("SeekDuelist() iterating through duelists")
        for image in listdir("TEMPLATES\\characters\\"):
            #set sprite
            pic="TEMPLATES\\characters\\"+image
            #read sprite image
            img1= imread(pic,IMREAD_GRAYSCALE)
            #read screen image
            img2= imread("screen.png",IMREAD_GRAYSCALE)

            #set ORB object
            orb= ORB_create(100000)

            #feature detect sprite
            kp1, des1= orb.detectAndCompute(img1, None)
            #feature detect screen
            kp2, des2= orb.detectAndCompute(img2, None)

            #match features from sprite and screen
            bf = BFMatcher(NORM_HAMMING, crossCheck=True)
            matches = bf.match(des1, des2)
            #sort accuracy of matches
            matches = sorted(matches,key=lambda image:image.distance)

            #select accurate matches
            found=[]
            for m in matches:
                if m.distance<=20:
                    found.append(m.distance)

            #check if accurate matches were found
            if len(found)!=0:

                #set image info
                img2_idx = matches[0].trainIdx
                
                # Get the coordinates
                # x - left to right
                # y - top to bottom
                (x,y) = kp2[img2_idx].pt
                info("SeekDuelist() match found for "+image+" at ("+str(int(x))+", "+str(int(y))+")")
                return (int(x),int(y))
        #Scroll one down to change screens
        info("SeekDuelist() no matches found. scrolling")
        mouse_event(MOUSEEVENTF_WHEEL, 0, 0, -1, 0)
        sleep(2)
    info("SeekDuelist() no matches found. raising exception")
    raise Exception
예제 #2
0
class OrbFeaturesHandler(FeaturesHandlerAbstractBase):
    """
    This class implements an ORB features exractor
    """
    features_extractor: ORB
    features_matcher: BFMatcher

    DEFAULT_DISTANCE_THRESHOLD_FOR_SUCCESSFULL_FEATURE_MATCH: int = 10

    DEFAULT_NUMBER_OF_FEATURES = 1000
    number_of_features: int

    def __init__(self, number_of_features: int = None):
        if number_of_features is None:
            self.number_of_features = self.DEFAULT_NUMBER_OF_FEATURES

        self.features_extractor = ORB_create(
            nfeatures=self.DEFAULT_NUMBER_OF_FEATURES)

        # ORB uses binary descriptors -> use hamming norm (XOR between descriptors)
        self.features_matcher = BFMatcher(NORM_HAMMING, crossCheck=True)

    def extract_features(self, frame: np.ndarray) -> ProcessedFrameData:
        """
        This method extracts ORB features
        """
        list_of_keypoints, descriptors = self.features_extractor.detectAndCompute(
            image=frame, mask=None)
        return ProcessedFrameData.build(frame=frame,
                                        list_of_keypoints=list_of_keypoints,
                                        descriptors=descriptors)

    def match_features(self, frame_1: ProcessedFrameData,
                       frame_2: ProcessedFrameData):
        """
        This method matches ORB features
        based on https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
        """
        list_of_matches: List[DMatch] = self.features_matcher.match(
            queryDescriptors=frame_1.descriptors,
            trainDescriptors=frame_2.descriptors)
        return sorted(list_of_matches, key=lambda x: x.distance)

        # # Sort them in the order of their distance.
        # return

    def is_handler_capable(self, frame: np.ndarray) -> bool:
        """
        This method implements ORB handler capability test
        """
        extracted_features: ProcessedFrameData = self.extract_features(
            frame=frame)
        return len(extracted_features.list_of_keypoints) >= int(
            0.9 * self.DEFAULT_NUMBER_OF_FEATURES)
class OrbFeature2D:
    def __init__(self, num_features=2000, scale_factor=1.2, num_levels=8):
        print('Using Orb Feature 2D')
        self.orb_extractor = ORB_create(num_features, scale_factor, num_levels)

    # extract keypoints
    def detect(self, img):
        # detect
        kps_tuples = self.orb_extractor.detect(img)
        # convert keypoints
        kps = [cv2.KeyPoint(*kp) for kp in kps_tuples]
        return kps

    def detectAndCompute(self, img):
        #detect and compute
        kps, des = self.orb_extractor.detectAndCompute(img, None)
        return kps, des
예제 #4
0
    def algorithm_ORB(self,
                      photo,
                      screen,
                      screen_colored,
                      nfeatures=4500,
                      descriptor_matcher_name='BruteForce-Hamming'):

        t1 = time.perf_counter()

        # Init algorithm
        orb = ORB_create(nfeatures)

        t2 = time.perf_counter()

        self.writeLog('Created ORB object - {}ms'.format(
            self.formatTimeDiff(t1, t2)))

        # Detect and compute
        kp_photo, des_photo = orb.detectAndCompute(photo, None)
        kp_screen, des_screen = orb.detectAndCompute(screen, None)

        t3 = time.perf_counter()
        self.writeLog('Detected keypoints - {}ms'.format(
            self.formatTimeDiff(t2, t3)))

        # Descriptor Matcher
        try:
            descriptor_matcher = DescriptorMatcher_create(
                descriptor_matcher_name)
        except:
            return False

        t4 = time.perf_counter()
        self.writeLog('Initialized Descriptor Matcher - {}ms'.format(
            self.formatTimeDiff(t3, t4)))

        # Calc knn Matches
        try:
            matches = descriptor_matcher.knnMatch(des_photo, des_screen, k=2)
        except:
            return False

        t5 = time.perf_counter()
        self.writeLog('Calced knn matches - {}ms'.format(
            self.formatTimeDiff(t4, t5)))

        if not matches or len(matches) == 0:
            return False

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append(m)

        t6 = time.perf_counter()
        self.writeLog('Filtered good matches - {}ms'.format(
            self.formatTimeDiff(t5, t6)))

        if not good or len(good) < 20:
            return False

        photo_pts = np.float32([kp_photo[m.queryIdx].pt
                                for m in good]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args
        screen_pts = np.float32([kp_screen[m.trainIdx].pt
                                 for m in good]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args

        M, _ = findHomography(photo_pts, screen_pts, RANSAC, 5.0)

        t7 = time.perf_counter()
        self.writeLog('Found Homography - {}ms'.format(
            self.formatTimeDiff(t6, t7)))

        if M is None or not M.any() or len(M) == 0:
            return False

        h, w = photo.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)  # pylint: disable=too-many-function-args
        dst = perspectiveTransform(pts, M)

        t8 = time.perf_counter()
        self.writeLog('Perspective Transform - {}ms'.format(
            self.formatTimeDiff(t7, t8)))

        minX = dst[0][0][0]
        minY = dst[0][0][1]
        maxX = dst[0][0][0]
        maxY = dst[0][0][1]

        for i in range(4):
            if dst[i][0][0] < minX:
                minX = dst[i][0][0]
            if dst[i][0][0] > maxX:
                maxX = dst[i][0][0]
            if dst[i][0][1] < minY:
                minY = dst[i][0][1]
            if dst[i][0][1] > maxY:
                maxY = dst[i][0][1]

        minX = int(minX)
        minY = int(minY)
        maxX = int(maxX)
        maxY = int(maxY)

        if minX < 0:
            minX = 0
        if minY < 0:
            minY = 0

        logging.info('minY {}'.format(int(minY)))
        logging.info('minX {}'.format(int(minX)))
        logging.info('maxY {}'.format(int(maxY)))
        logging.info('maxX {}'.format(int(maxX)))

        if maxX - minX <= 0:
            return False
        if maxY - minY <= 0:
            return False

        imwrite(self.match_dir + '/result.png',
                screen_colored[int(minY):int(maxY),
                               int(minX):int(maxX)])

        t9 = time.perf_counter()
        self.writeLog('Wrote Image - {}ms'.format(self.formatTimeDiff(t8, t9)))

        return True
예제 #5
0
class ROS_Video_Node():
    def __init__(self, name):
        """This initializes the object"""
        self.name = name
        self.last_frame = None
        self.current_frame = None
        self.match_frames = None

        #Create an ORB object for keypoint detection
        self.orb = ORB_create(nfeatures=100,
                              scaleFactor=2,
                              edgeThreshold=100,
                              fastThreshold=10)

        #Create a BF object for keypoint matching
        self.bf = BFMatcher(NORM_L1, crossCheck=True)

        #For keeping track of the drone's current velocity
        self.vel_x = None
        self.vel_y = None

        #For plotting the drone's current and past velocities
        self.times = []
        self.x_s = []
        self.y_s = []

        #Creating a publisher that will publish the calculated velocity to the ROS topic /quadrotor/ardrone/calc_vel
        self.pub = rospy.Publisher("/quadrotor/ardrone/calc_vel",
                                   Twist,
                                   queue_size=10)

    def detect_motion(self):
        """This function detects the motion between the current and last frame."""
        if (self.last_frame == self.current_frame).all():
            print(
                "Beep boop! I think my current frame is exactly the same as my last frame. \nEither I'm not moving, or something is very wrong.\n"
            )
            namedWindow(self.name)
            imshow(self.name, self.current_frame)
            waitKey(1)
            return None

        #This finds the keypoints and descriptors with SIFT
        kp1, des1 = self.orb.detectAndCompute(self.last_frame, None)
        kp2, des2 = self.orb.detectAndCompute(self.current_frame, None)

        #Match descriptors
        matches = self.bf.match(des1, des2)

        #Sort them in the order of their distance
        matches = sorted(matches, key=lambda x: x.distance)

        #This is a test to see if I can filter out bad matches from the getgo
        #Right now I've set a threshold that I think will keep only bad matches to see if it makes the data super noisy
        # print("MATCH DISTANCES")
        #undoiing test, ELEPHANT
        filtered_matches = matches
        # filtered_matches = []
        # for match in matches:
        #     if match.distance > 1000:
        #         filtered_matches.append(match)

        if len(filtered_matches) < 5:
            print(
                "Beep boop! Not enough good matches were found. This data is unreliable. \n Try moving me above the building, please.\n"
            )
            namedWindow(self.name)
            imshow(self.name, self.current_frame)
            waitKey(1)
            return None

        #create arrays of the coordinates for keypoints in the two frames
        kp1_coords = asarray(
            [kp1[mat.queryIdx].pt for mat in filtered_matches])
        kp2_coords = asarray(
            [kp2[mat.trainIdx].pt for mat in filtered_matches])

        #calculate the translations needed to get from the first set of keypoints to the next set of keypoints
        translations = kp2_coords - kp1_coords

        least_error = 5
        for translation in translations:
            a = translation[0] * ones((translations.shape[0], 1))
            b = translation[1] * ones((translations.shape[0], 1))
            test_translation = concatenate((a, b), axis=1)
            test_coords = kp1_coords + test_translation
            error = sqrt(mean(square(kp2_coords - test_coords)))
            if error < least_error:
                least_error = error
                best_translation = translation

        self.vel_x = translation[0]
        self.vel_y = translation[1]

        # Draw first matches.
        draw_params = dict(
            matchColor=(0, 255, 0),  # draw matches in green color
            singlePointColor=None,
            matchesMask=None,  # draw only inliers
            flags=2)
        self.match_frames = drawMatches(self.last_frame, kp1,
                                        self.current_frame, kp2, matches, None,
                                        **draw_params)

        namedWindow(self.name)
        imshow(self.name, self.match_frames)
        waitKey(1)

        if least_error < 2:
            print("This is a new match")
            print("X velocity: ", self.vel_x)
            print("Y velocity: ", self.vel_y)
            print("least_error: ", least_error)

            #Publish the velocities found
            calc_vel = Twist()
            calc_vel.linear.x = self.vel_x
            calc_vel.linear.y = self.vel_y

            self.pub.publish(calc_vel)

    def plot_current_vel(self):
        """This function live plots the quadrotor's measured velocity"""
        #save the current velocities
        self.times.append(time())
        self.x_s.append(self.vel_x)
        self.y_s.append(self.vel_y)

        #plot the velocities
        plt.title("Live Velocities")
        plt.ylabel("Velocity")
        plt.xlabel("Time")
        plt.ylim(-20, 20)
        try:
            plt.plot(self.times[-60:],
                     self.x_s[-60:],
                     'r-',
                     label="X Velocity")
            plt.plot(self.times[-60:],
                     self.y_s[-60:],
                     'b-',
                     label="Y Velocity")
        except:
            plt.plot(self.times, self.x_s, 'r-', label="X Velocity")
            plt.plot(self.times, self.y_s, 'b-', label="Y Velocity")
        plt.legend(loc="upper right")
class ServoingDesignedFeaturesSimpleQuadPanda3dEnv(SimpleQuadPanda3dEnv,
                                                   ServoingEnv):
    def __init__(self,
                 action_space,
                 feature_type=None,
                 filter_features=None,
                 max_time_steps=100,
                 distance_threshold=4.0,
                 **kwargs):
        """
        filter_features indicates whether to filter out key points that are not
        on the object in the current image. Key points in the target image are
        always filtered out.
        """
        SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs)
        ServoingEnv.__init__(self,
                             env=self,
                             max_time_steps=max_time_steps,
                             distance_threshold=distance_threshold)

        lens = self.camera_node.node().getLens()
        self._observation_space.spaces['points'] = BoxSpace(
            np.array([-np.inf, lens.getNear(), -np.inf]),
            np.array([np.inf, lens.getFar(), np.inf]))
        film_size = tuple(int(s) for s in lens.getFilmSize())
        self.mask_camera_sensor = Panda3dMaskCameraSensor(
            self.app, (self.skybox_node, self.city_node),
            size=film_size,
            near_far=(lens.getNear(), lens.getFar()),
            hfov=lens.getFov())
        for cam in self.mask_camera_sensor.cam:
            cam.reparentTo(self.camera_sensor.cam)

        self.filter_features = True if filter_features is None else False
        self._feature_type = feature_type or 'sift'
        if cv2.__version__.split('.')[0] == '3':
            from cv2.xfeatures2d import SIFT_create, SURF_create
            from cv2 import ORB_create
            if self.feature_type == 'orb':
                # https://github.com/opencv/opencv/issues/6081
                cv2.ocl.setUseOpenCL(False)
        else:
            SIFT_create = cv2.SIFT
            SURF_create = cv2.SURF
            ORB_create = cv2.ORB
        if self.feature_type == 'sift':
            self._feature_extractor = SIFT_create()
        elif self.feature_type == 'surf':
            self._feature_extractor = SURF_create()
        elif self.feature_type == 'orb':
            self._feature_extractor = ORB_create()
        else:
            raise ValueError("Unknown feature extractor %s" %
                             self.feature_type)
        if self.feature_type == 'orb':
            self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        else:
            self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
        self._target_key_points = None
        self._target_descriptors = None

    @property
    def feature_type(self):
        return self._feature_type

    @property
    def target_key_points(self):
        return self._target_key_points

    @property
    def target_descriptors(self):
        return self._target_descriptors

    def _step(self, action):
        obs, reward, done, info = SimpleQuadPanda3dEnv.step(self, action)
        done = done or obs.get('points') is None
        return obs, reward, done, info

    def step(self, action):
        return ServoingEnv.step(self, action)

    def reset(self, state=None):
        self._target_key_points = None
        self._target_descriptors = None
        self._target_obs = None
        self._target_pos = self.get_relative_target_position()
        self._t = 0
        return SimpleQuadPanda3dEnv.reset(self, state=state)

    def observe(self):
        obs = SimpleQuadPanda3dEnv.observe(self)
        assert isinstance(obs, dict)

        mask, depth_image, _ = self.mask_camera_sensor.observe(
        )  # used to filter out key points
        if self._target_obs is None:
            self._target_obs = {'target_' + k: v for (k, v) in obs.items()}
            self._target_key_points, self._target_descriptors = \
                self._feature_extractor.detectAndCompute(self._target_obs['target_image'], mask)

        if self._target_key_points:
            key_points, descriptors = \
                self._feature_extractor.detectAndCompute(obs['image'], mask if self.filter_features else None)
            matches = self._matcher.match(descriptors,
                                          self._target_descriptors)
        else:
            matches = False

        if matches:
            key_points_xy = np.array(
                [key_points[match.queryIdx].pt for match in matches])
            target_key_points_xy = np.array([
                self._target_key_points[match.trainIdx].pt for match in matches
            ])
            key_points_XYZ = xy_depth_to_XYZ(self.camera_sensor.lens,
                                             key_points_xy, depth_image)
            target_key_points_XYZ = xy_depth_to_XYZ(self.camera_sensor.lens,
                                                    target_key_points_xy,
                                                    depth_image)
            obs['points'] = key_points_XYZ
            obs['target_points'] = target_key_points_XYZ
        else:
            obs['points'] = None
            obs['target_points'] = None

        obs.update(self._target_obs)
        return obs