def sift_features(images, thresh_1, thresh_2):
    sift_vectors = {}
    descriptor_list = []
    #sift = cv2.SIFT_create(contrastThreshold=threshold)
    for key, value in images.items():
        features = []

        if key in ['5']:
            sift = cv2.SIFT_create(contrastThreshold=thresh_1)
        elif key in ['1']:
            sift = cv2.SIFT_create(contrastThreshold=thresh_2)
        elif key in ['3']:
            sift = cv2.SIFT_create(contrastThreshold=0.005, edgeThreshold=14)
        elif key in ['2', '4']:
            sift = cv2.SIFT_create(contrastThreshold=0.06, edgeThreshold=8)

        for img in value:
            kp, des = sift.detectAndCompute(img, None)

            descriptor_list.append(des)
            features.append(des)
        sift_vectors[key] = features
        print(features[0].shape)

    return descriptor_list, sift_vectors
예제 #2
0
def sift_features(images, thresh_1, thresh_2):
    sift_vectors = {}
    descriptor_list = []
    #sift = cv2.SIFT_create(contrastThreshold=threshold)
    for key, value in images.items():
        features = []

        if key in ['1']:
            sift = cv2.SIFT_create(contrastThreshold=thresh_1)
        elif key in ['3']:
            sift = cv2.SIFT_create(contrastThreshold=thresh_2)
        elif key in ['4', '5']:
            sift = cv2.SIFT_create(contrastThreshold=0.05, edgeThreshold=8)
        elif key in ['2']:
            sift = cv2.SIFT_create(contrastThreshold=0.07, edgeThreshold=6)
        for img in value:
            kp = []
            des = []
            for i in range(3):
                channel = img[:,:,i]
                kp_c, des_c = sift.detectAndCompute(channel, None)
                kp = [*kp, *kp_c]
                if des_c is not None:
                    des = [*des, *des_c]
                else:
                    print(f'desc none, len kp {len(kp)}')
                    print(key)
            des_staked = np.vstack(des)
            des_staked = np.reshape(des_staked, (-1, 128))
            descriptor_list.append(des_staked)
            features.append(des_staked)
        sift_vectors[key] = features
        print(features[0].shape)

    return descriptor_list, sift_vectors
예제 #3
0
    def get_kps(self):
        kp_list = []
        des_list = []
        for img in self.input_frames:
            t0 = time.time()
            if self.method == 'sift':
                sift = cv2.SIFT_create()
                kp = sift.detect(img, None)
                time_taken = (time.time() - t0) / len(kp)
                kp, des = sift.compute(img, kp)
            elif self.method == 'orb':
                orb = cv2.ORB_create()
                kp = orb.detect(img, None)
                time_taken = (time.time() - t0) / len(kp)
                kp, des = orb.compute(img, kp)
            elif self.method == 'shi-tomasi':
                points = cv2.goodFeaturesToTrack(img,
                                                 maxCorners=100,
                                                 qualityLevel=0.5,
                                                 minDistance=10)
                time_taken = (time.time() - t0) / len(points)
                kp = [
                    cv2.KeyPoint(x=f[0][0], y=f[0][1], _size=20)
                    for f in points
                ]
                sift = cv2.SIFT_create()
                kp, des = sift.compute(img, kp)

            kp_list.append(kp)
            des_list.append(des)
        return time_taken, kp_list, des_list
예제 #4
0
def extract_features_sift(
        image: np.ndarray, config: Dict[str, Any],
        features_count: int) -> Tuple[np.ndarray, np.ndarray]:
    sift_edge_threshold = config["sift_edge_threshold"]
    sift_peak_threshold = float(config["sift_peak_threshold"])
    # SIFT support is in cv2 main from version 4.4.0
    if context.OPENCV44 or context.OPENCV5:
        # OpenCV versions concerned /** 3.4.11, >= 4.4.0 **/  ==> Sift became free since March 2020
        detector = cv2.SIFT_create(edgeThreshold=sift_edge_threshold,
                                   contrastThreshold=sift_peak_threshold)
        descriptor = detector
    elif context.OPENCV3 or context.OPENCV4:
        try:
            # OpenCV versions concerned /** 3.2.x, 3.3.x, 3.4.0, 3.4.1, 3.4.2, 3.4.10, 4.3.0, 4.4.0 **/
            detector = cv2.xfeatures2d.SIFT_create(
                edgeThreshold=sift_edge_threshold,
                contrastThreshold=sift_peak_threshold)
        except AttributeError as ae:
            # OpenCV versions concerned /** 3.4.3, 3.4.4, 3.4.5, 3.4.6, 3.4.7, 3.4.8, 3.4.9, 4.0.x, 4.1.x, 4.2.x **/
            if "no attribute 'xfeatures2d'" in str(ae):
                logger.error(
                    "OpenCV Contrib modules are required to extract SIFT features"
                )
            raise
        descriptor = detector
    else:
        detector = cv2.FeatureDetector_create("SIFT")
        descriptor = cv2.DescriptorExtractor_create("SIFT")
        detector.setDouble("edgeThreshold", sift_edge_threshold)
    while True:
        logger.debug(
            "Computing sift with threshold {0}".format(sift_peak_threshold))
        t = time.time()
        # SIFT support is in cv2 main from version 4.4.0
        if context.OPENCV44 or context.OPENCV5:
            detector = cv2.SIFT_create(edgeThreshold=sift_edge_threshold,
                                       contrastThreshold=sift_peak_threshold)
        elif context.OPENCV3:
            detector = cv2.xfeatures2d.SIFT_create(
                edgeThreshold=sift_edge_threshold,
                contrastThreshold=sift_peak_threshold)
        else:
            detector.setDouble("contrastThreshold", sift_peak_threshold)
        points = detector.detect(image)
        logger.debug("Found {0} points in {1}s".format(len(points),
                                                       time.time() - t))
        if len(points) < features_count and sift_peak_threshold > 0.0001:
            sift_peak_threshold = (sift_peak_threshold * 2) / 3
            logger.debug("reducing threshold")
        else:
            logger.debug("done")
            break
    points, desc = descriptor.compute(image, points)
    if config["feature_root"]:
        desc = root_feature(desc)
    points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
    return points, desc
예제 #5
0
def extract_features_sift(image, config):
    sift_edge_threshold = config['sift_edge_threshold']
    sift_peak_threshold = float(config['sift_peak_threshold'])
    # SIFT support is in cv2 main from version 4.4.0
    if context.OPENCV44 or context.OPENCV5:
        detector = cv2.SIFT_create(edgeThreshold=sift_edge_threshold,
                                   contrastThreshold=sift_peak_threshold)
    elif context.OPENCV3:
        try:
            detector = cv2.xfeatures2d.SIFT_create(
                edgeThreshold=sift_edge_threshold,
                contrastThreshold=sift_peak_threshold)
        except AttributeError as ae:
            if "no attribute 'xfeatures2d'" in str(ae):
                logger.error(
                    'OpenCV Contrib modules are required to extract SIFT features'
                )
            raise
        descriptor = detector
    else:
        detector = cv2.FeatureDetector_create('SIFT')
        descriptor = cv2.DescriptorExtractor_create('SIFT')
        detector.setDouble('edgeThreshold', sift_edge_threshold)
    while True:
        logger.debug(
            'Computing sift with threshold {0}'.format(sift_peak_threshold))
        t = time.time()
        # SIFT support is in cv2 main from version 4.4.0
        if context.OPENCV44 or context.OPENCV5:
            detector = cv2.SIFT_create(edgeThreshold=sift_edge_threshold,
                                       contrastThreshold=sift_peak_threshold)
        elif context.OPENCV3:
            detector = cv2.xfeatures2d.SIFT_create(
                edgeThreshold=sift_edge_threshold,
                contrastThreshold=sift_peak_threshold)
        else:
            detector.setDouble("contrastThreshold", sift_peak_threshold)
        points = detector.detect(image)
        logger.debug('Found {0} points in {1}s'.format(len(points),
                                                       time.time() - t))
        if len(points) < config[
                'feature_min_frames'] and sift_peak_threshold > 0.0001:
            sift_peak_threshold = (sift_peak_threshold * 2) / 3
            logger.debug('reducing threshold')
        else:
            logger.debug('done')
            break
    points, desc = descriptor.compute(image, points)
    if config['feature_root']:
        desc = root_feature(desc)
    points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
    return points, desc
예제 #6
0
def sift_detect(image, mask, options):
    """
     Extract keypoints and descriptors with Scale Invariant Features Transform
     Args:
        image (ndarray): (H x W) 2D array of type np.uint8 containing a grayscale image.
        mask: mask to be applied to the image [1 = yes, 0 = no]
        options: Optional arguments to adjust the sift option
     Returns:
        descriptors (ndarray): 2D array of type np.float32 and shape (#keypoints x 128)
        containing local descriptors for the keypoints."""

    grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    grayscale_image = cv2.resize(grayscale_image, (256, 256), interpolation=cv2.INTER_AREA)

    if mask is not None:
        mask = cv2.resize(mask, (256, 256), interpolation=cv2.INTER_AREA)
        mask = (mask == 0).astype(np.uint8) * 255

    sift = cv2.SIFT_create(nfeatures=options.sift_features,
                           nOctaveLayers=options.sift_octlayer,
                           contrastThreshold=options.sift_thresh,
                           edgeThreshold=options.sift_edgethresh,
                           sigma=options.sift_sigma)
    keypoints = sift.detect(grayscale_image, mask)

    #drawed_image = cv2.drawKeypoints(z, keypoints, z, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    return keypoints
예제 #7
0
    def sift(self,
             template_path: str):  # -> Tuple[Tuple[int, int], Tuple[int, int]]
        sift = cv2.SIFT_create()
        template = cv2.imread(template_path)
        template_height, template_width = template.shape[:2]
        keypoint1, descriptor1 = sift.detectAndCompute(template, None)
        t0 = time.time()  #########检查下列语句运行时间#########
        keypoint2, descriptor2 = sift.detectAndCompute(self.img, None)
        print(time.time() - t0)  ###############################

        bf = cv2.BFMatcher()
        matches = bf.knnMatch(descriptor1, descriptor2, k=2)
        good = [[m] for m, n in matches if m.distance < 0.75 * n.distance]
        # 如果好点个数不到4个,将无法取得变换矩阵,因此提前返回。
        if len(good) < 6:
            return (0, 0), (0, 0)
        list_kp1 = np.reshape([keypoint1[m.queryIdx].pt for [m] in good],
                              (-1, 1, 2))
        list_kp2 = np.reshape([keypoint2[m.trainIdx].pt for [m] in good],
                              (-1, 1, 2))
        quad = cv2.perspectiveTransform(
            np.float32([[0, 0], [0, template_height - 1],
                        [template_width - 1, template_height - 1],
                        [template_width - 1, 0]]).reshape(-1, 1, 2),
            cv2.findHomography(list_kp1, list_kp2, cv2.RANSAC, 5.0)[0])

        #cv2.namedWindow('BFmatch', cv2.WINDOW_NORMAL)
        #cv2.resizeWindow('BFmatch', self.height, self.width)
        #cv2.imshow('BFmatch', cv2.drawMatchesKnn(template, keypoint1, self.img, keypoint2, good, None, flags=2))
        #cv2.waitKey(0)

        return (int(min(quad[:, 0, 0])),
                int(min(quad[:, 0, 1]))), (int(max(quad[:, 0, 0])),
                                           int(max(quad[:, 0, 1])))
예제 #8
0
def sift_descriptor():
    img = cv.imread('lion.jpg')
    gray= cv.cvtColor(img,cv.COLOR_BGR2GRAY)
    sift = cv.SIFT_create()
    kp = sift.detect(gray,None)
    img=cv.drawKeypoints(gray,kp,img)
    cv.imwrite('sift_keypoints.jpg',img)
예제 #9
0
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv.SIFT_create()
        norm = cv.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv.ORB_create(400)
        norm = cv.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv.AKAZE_create()
        norm = cv.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv.BRISK_create()
        norm = cv.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv.NORM_L2:
            flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        else:
            flann_params = dict(
                algorithm=FLANN_INDEX_LSH,
                table_number=6,  # 12
                key_size=12,  # 20
                multi_probe_level=1)  # 2
        matcher = cv.FlannBasedMatcher(
            flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv.BFMatcher(norm)
    return detector, matcher
예제 #10
0
def get_sift_correspondences(img1, img2):
    '''
    Input:
        img1: numpy array of the first image
        img2: numpy array of the second image

    Return:
        points1: numpy array [N, 2], N is the number of correspondences
        points2: numpy array [N, 2], N is the number of correspondences
    '''
    #sift = cv.xfeatures2d.SIFT_create()# opencv-python and opencv-contrib-python version == 3.4.2.16 or enable nonfree
    sift = cv.SIFT_create()             # opencv-python==4.5.1.48
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    matcher = cv.BFMatcher()
    matches = matcher.knnMatch(des1, des2, k=2)
    good_matches = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good_matches.append(m)

    good_matches = sorted(good_matches, key=lambda x: x.distance)
    points1 = np.array([kp1[m.queryIdx].pt for m in good_matches])
    points2 = np.array([kp2[m.trainIdx].pt for m in good_matches])
    
    img_draw_match = cv.drawMatches(img1, kp1, img2, kp2, good_matches, None, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    cv.imshow('match', img_draw_match)
    cv.waitKey(0)
    return points1, points2
예제 #11
0
 def extract_sift_features(X):
     image_descriptors = []
     sift = cv2.SIFT_create()
     for i in range(len(X)):
         kp, des = sift.detectAndCompute(X[i], None)
         image_descriptors.append(des)
     return image_descriptors
예제 #12
0
def find_frame(url, video_path, id):

    #load the image from URL
    resp = urllib.urlopen(url)
    image = np.asarray(bytearray(resp.read()), dtype="uint8")
    gray = cv.imdecode(image, cv.IMREAD_GRAYSCALE)

    bf = cv.BFMatcher()  #feature matching object
    sift = cv.SIFT_create()  #sift detection object
    kp, des = sift.detectAndCompute(gray, None)  #get keypoints of thumbnail

    #get videocapture from video URL
    cap = cv.VideoCapture(video_path)  #get video
    if (not cap.isOpened()):
        cap = cv.VideoCapture(findVideo(id))

    sample_rate_exact = cap.get(cv.CAP_PROP_FPS)  #find frame rate
    sample_rate = round(
        sample_rate_exact)  #rounded frame rate (for while loop)
    success = cap.grab()  # get the next frame
    fno = 0  #frame number
    best_match = 0  #maximum number of matches
    best_fno = 0  #frame number of closes match

    while success:
        if fno % (sample_rate) == 0:  #sample rate of about 1 frame per second
            _, img = cap.retrieve()
            cmp = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
            kp1, des1 = sift.detectAndCompute(cmp, None)

            try:
                matches = bf.knnMatch(des, des1, k=2)
            except:
                matches = []

            good = 0
            for match in matches:  #ratio test
                if len((match)) < 2:
                    continue
                m, n = match
                if m.distance < 0.75 * n.distance:
                    good += 1

            if good > best_match:
                best_match = good
                #best_img = img
                best_fno = fno

        fno += 1
        success = cap.grab()

    cap.release()

    if (best_match / len(kp) < 0.1):  #no match condition
        print("NO MATCH")

    minutes = int((best_fno / sample_rate_exact) // 60)
    seconds = round((best_fno / sample_rate_exact) % 60)

    return ("%d:%02d" % (minutes, seconds))  #time stamp
예제 #13
0
def recognise_picture(img, template):

    sift = cv.SIFT_create()

    kp1, des1 = sift.detectAndCompute(template, None)
    kp2, des2 = sift.detectAndCompute(img, None)

    #ex_im = cv.drawKeypoints (resized_image, kp1, None)
    #ex_im_cv = cv.drawKeypoints (cv_image, kp2, None)
    #cv.imwrite('res.jpg', ex_im)
    #cv.imwrite('res_cv.jpg', ex_im_cv)

    bf = cv.BFMatcher()
    matches = bf.knnMatch(des2, des1, k=2)

    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append([m])

    print(str(len(good)) + "/10")
    if len(good) >= 5:
        src_pts = np.float32([kp2[m[0].queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        src_pts = src_pts.reshape((len(src_pts), -1))
        pts = src_pts
        min_x, min_y = np.int32(pts.min(axis=0))
        max_x, max_y = np.int32(pts.max(axis=0))
        cv.rectangle(img, (min_x, min_y), (max_x, max_y), 255, 2)

        return img
예제 #14
0
def main():
    """
    The main function of the file.
    """
    parser = argparse.ArgumentParser(description="Runs sift for an image over a video with matching.")
    parser.add_argument("targetImage", nargs=1, type=str, default=None,
        help="The image to search for in the video.")
    parser.add_argument("inputVideo", nargs=1, type=str, default=None,
        help="The video to search inside of.")
    parser.add_argument("--fps", nargs=1, type=int, default=[30], required=False,
        help="The frames per second to display the video at.")
    parser.add_argument("--ratioThresh", nargs=1, type=float, default=[RATIO_THRESHOLD], required=False,
        help="The threshold to use for the ratio test during feature matching.")
    args = parser.parse_args()

    # Load the target image, detect features
    targetImage = cv2.imread(args.targetImage[0], cv2.IMREAD_GRAYSCALE)
    if targetImage is None:
        print("Could not open image at \"" + args.targetImage[0] + "\"")
        exit()
    sift = cv2.SIFT_create()
    targetPoints, targetDescriptions = sift.detectAndCompute(targetImage, None)

    # Create the matching system and match features for each frame
    matcher = cv2.BFMatcher()
    writtenImage = None
    capture = cv2.VideoCapture(args.inputVideo[0])
    if not capture.isOpened():
        print("Could not open the video at \"" + args.inputVideo[0] + "\"")
        exit()
    while capture.isOpened():
        startTime = time.time()
        ret, videoImg = capture.read()
        if not ret: break
        keypoints, descriptions = sift.detectAndCompute(videoImg, None)
        
        # Run the matching system
        matches = matcher.knnMatch(targetDescriptions, descriptions, k=2)

        # Apply ratio test
        good = []
        for match, other in matches:
            if match.distance < args.ratioThresh[0]*other.distance:
                good.append([match])

        # Draw, display
        writtenImage = cv2.drawMatchesKnn(targetImage, targetPoints, videoImg, keypoints,
            good, writtenImage, singlePointColor=(255, 0, 255), flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
        cv2.imshow(WINDOW_NAME, writtenImage)
        if cv2.waitKey(1) & 0xFF == ord('q'): break
        if args.fps is not None: 
            sleepTime = (1 / args.fps[0]) - (time.time() - startTime)
            if sleepTime > 0: sleep(sleepTime)
    capture.release()

    cv2.imshow(WINDOW_NAME, writtenImage)
    cv2.waitKey(0)
    cv2.destroyWindow(WINDOW_NAME)

    exit()
예제 #15
0
def find_kp_and_matrix(images):
    (left, right) = images
    # gray1 = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY)
    # gray2 = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY)
    sift = cv2.SIFT_create()
    kp1, des1 = sift.detectAndCompute(left, None)
    kp2, des2 = sift.detectAndCompute(right, None)
    # cv2.imshow('keypoints left image:', cv2.drawKeypoints(left, kp2, None))
    # cv2.imshow('keypoints right image:', cv2.drawKeypoints(right, kp1, None))
    # cv2.waitKey(0)
    print('aangal keypoints:', len(des1), len(des2))
    match = cv2.BFMatcher()
    matches = match.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)
    print('aantal matches:', len(good))
    MIM_MATCH_COUNT = 10
    if len(good) <= MIM_MATCH_COUNT:
        return None
    dst_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    src_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    return M
예제 #16
0
def get_sift_descriptors(img, keypoint_step_size=settings.KEYPOINTS_STEP_SIZE):
    """
    Gets the SIFT descriptors using the specified keypoint_step_size and returns them

    Args:
        img         (np.ndarray): image loaded using numpy
        keypoint_step_size (int): step size & keypoint diameter

    Returns:
        np.array with shape (num of key points, 128-SIFT descriptors)
    """
    assert isinstance(img, np.ndarray)
    assert min(img.shape[:2]) >= keypoint_step_size, \
        "keypoint_step_size must be greater or equal to the width or height of the provided img"

    sift = cv.SIFT_create()
    keypoints = [
        cv.KeyPoint(x, y, keypoint_step_size)
        for y in range(0, img.shape[0], keypoint_step_size)
        for x in range(0, img.shape[1], keypoint_step_size)
    ]
    kp, des = sift.compute(img, keypoints)
    # kp, des = sift.detectAndCompute(img, None)
    # kp_img = cv.drawKeypoints(img, kp, img, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    # plt.imshow(kp_img)
    # plt.show()

    if des is not None:
        return des

    return np.empty([0, 128], dtype=np.float32)
예제 #17
0
    def __init__(self, args, rcnn_model):
        self.args = args
        self.device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')
        self.model = rcnn_model
        self.CLASS_NAMES = ["__background__", "corn_stem"]
        self.torch_trans = Transforms.Compose([Transforms.ToTensor()])
        self.sift = cv2.SIFT_create()
        self.bf_matcher = cv2.BFMatcher()
        self.frame_buffer = []

        # compute rays in advance
        self.K = np.array([[615.311279296875, 0.0, 430.1778869628906],
                           [0.0, 615.4699096679688, 240.68307495117188],
                           [0.0, 0.0, 1.0]])
        self.im_w = 848
        self.im_h = 480
        x, y = np.arange(self.im_w), np.arange(self.im_h)
        xx, yy = np.meshgrid(x, y)
        points = np.stack([xx, yy], axis=2).reshape(-1, 2)
        self.rays = np.dot(np.insert(points, 2, 1, axis=1),
                           np.linalg.inv(self.K).T).reshape(
                               self.im_h, self.im_w, 3)
        # intrinsic matrix for realsense d435 480 x 848

        self.d_plane = -0.5
예제 #18
0
def detect_matches_coordinates(tubes_examples: List[np.array],
                               image: np.array,
                               tolerance=0.75):
    # Initiate SIFT detector
    sift = cv2.SIFT_create()
    # find the keypoints and descriptors with SIFT
    kp1 = kp1, des1 = sift.detectAndCompute(tubes_examples[0], None)
    for tube in tubes_examples[1:]:
        kp12, des12 = sift.detectAndCompute(tube, None)
        kp1 = kp1 + kp12
        des1 = np.concatenate((des1, des12))

    kp2, des2 = sift.detectAndCompute(image, None)
    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    # Apply ratio test
    good_matches = []
    for m, n in matches:
        if m.distance < tolerance * n.distance:
            good_matches.append(m)

    coordinates = []
    for match in good_matches:
        idx = match.trainIdx
        keypoint = kp2[idx]
        coordinates.append((int(keypoint.pt[0]), int(keypoint.pt[1])))

    return coordinates
예제 #19
0
def prepare_rank_matcher_data():
    print("Preparing rank matcher...", end='', flush=True)
    for rank in range(1, 36):
        rank_template = cv2.cvtColor(cv2.imread(f"rank_images/{rank}.png"), cv2.COLOR_BGR2GRAY)
        sift = cv2.SIFT_create()
        rank_matcher_data[rank] = sift.detectAndCompute(rank_template, None)
    print("Done")
def demo_sift_bfmatcher_knn():
    img1 = cv2.imread(
        '/media/admini/lavie/dataset/birdview_dataset/00/submap_1.png', 0)
    img2 = cv2.imread(
        '/media/admini/lavie/dataset/birdview_dataset/00/submap_3.png', 0)

    # Initiate SIFT detector
    sift = cv2.SIFT_create(nfeatures=200,
                           contrastThreshold=0.002,
                           edgeThreshold=15,
                           sigma=1.2)

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # Apply ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.95 * n.distance:
            good.append([m])

    # cv2.drawMatchesKnn expects list of lists as matches.
    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)

    cv2.imshow('img3', img3)
    cv2.waitKey(0)
예제 #21
0
def rank_detection(rank_card):
    # https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
    rank_card = cv2.cvtColor(rank_card, cv2.COLOR_BGR2GRAY)

    highest_match = (0, 0)
    for rank in range(1, 36):
        sift = cv2.SIFT_create()

        kp1, des1 = sift.detectAndCompute(rank_card, None)
        kp2, des2 = rank_matcher_data[rank]

        flann_index_kdtree = 1
        index_params = dict(algorithm=flann_index_kdtree, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        good_matches = 0
        for k, (m, n) in enumerate(matches):
            if m.distance < 0.7 * n.distance:
                good_matches += 1

        if good_matches > highest_match[1]:
            highest_match = (rank, good_matches)
    return highest_match[0]
예제 #22
0
def draw_key_points(image, detailed=True, savefile=None):
    '''
    @brief  Draws the found keypoints on top of the image

    @param image        The original image
    @param detailed     Whether or not to draw keypoints based on relative 
                        scale
    @param savefile     Save location for resulting images. Does not save if 
                        None
    @return             The newly generated image containing the drawn keypoints
    '''
    sift = cv2.SIFT_create()

    if len(image.shape) > 2:
        im_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        im_gray = image

    kp = sift.detect(image,None)
    
    if detailed:
        im_kp=cv2.drawKeypoints(im_gray, kp, image, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    else:
        im_kp=cv2.drawKeypoints(im_gray, kp, image)

    if savefile:
        cv2.imwrite(savefile, im_kp)
    
    return im_kp
예제 #23
0
def SIFT(src, dst):
    # SIFT
    sift = cv2.SIFT_create()

    kp1_SIFT, desc1_SIFT = sift.detectAndCompute(src, None)
    kp2_SIFT, desc2_SIFT = sift.detectAndCompute(dst, None)

    # which keypoints/descriptor to use?
    kp1 = kp1_SIFT
    kp2 = kp2_SIFT
    desc1 = desc1_SIFT
    desc2 = desc2_SIFT

    # (brute force) matching of descriptors
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(desc1, desc2, k=2)

    # Apply ratio test
    good_matches = []
    good_matches_without_list = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good_matches.append([m])
            good_matches_without_list.append(m)
    # plt.imshow(img3),plt.show()
    src_pts = np.float32([
        kp1[m.queryIdx].pt for m in good_matches_without_list
    ]).reshape(-1, 1, 2)
    dst_pts = np.float32([
        kp2[m.trainIdx].pt for m in good_matches_without_list
    ]).reshape(-1, 1, 2)
    return src_pts, dst_pts
예제 #24
0
 def initialize(self, device):
     if self.type == "sift":
         self.model = cv.SIFT_create(nfeatures=self.n_keypoints)
     elif self.type == "orb":
         self.model = cv.ORB_create(nfeatures=self.n_keypoints)
     elif self.type == "brisk":
         self.model = cv.BRISK_create()
def demo_sift_bfmatcher():
    img1 = cv2.imread(
        '/media/admini/lavie/dataset/birdview_dataset/00/submap_1.png',
        0)  # queryImage
    img2 = cv2.imread(
        '/media/admini/lavie/dataset/birdview_dataset/00/submap_3.png',
        0)  # trainImage

    # Initiate SIFT detector
    sift = cv2.SIFT_create(nfeatures=200,
                           contrastThreshold=0.002,
                           edgeThreshold=15,
                           sigma=1.2)

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # Match descriptors.
    matches = bf.match(des1, des2)

    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2)

    cv2.imshow('img3', img3)
    cv2.waitKey(0)
예제 #26
0
def sift(filename):
    img = cv.imread(filename)
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    sift = cv.SIFT_create()
    kp = sift.detect(gray, None)
    img = cv.drawKeypoints(gray, kp, img)
    cv.imwrite('sift-' + filename, img)
예제 #27
0
def sift_compare(sample_path, query_path):
    # 创建SIFT特征提取器
    comparisonImageList = []  # 记录比较结果
    sift = cv2.SIFT_create()
    # 创建FLANN匹配对象
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)

    sample_image = cv2.imread(sample_path, 0)
    kp1, des1 = sift.detectAndCompute(sample_image, None)  # 提取样本图片的特征
    files = glob.glob(query_path + '/*.png')

    for p in files:

        queryImage = cv2.imread(p, 0)
        kp2, des2 = sift.detectAndCompute(queryImage, None)  # 提取比对图片的特征
        matches = flann.knnMatch(
            des1, des2, k=2)  # 匹配特征点,为了删选匹配点,指定k为2,这样对样本图的每个特征点,返回两个匹配
        (matchNum, matchesMask) = getMatchNum(matches, 0.85)  # 通过比率条件,计算出匹配程度
        matchRatio = matchNum * 100 / len(matches)
        drawParams = dict(matchColor=(0, 255, 0),
                          singlePointColor=(255, 0, 0),
                          matchesMask=matchesMask,
                          flags=0)
        comparisonImage = cv2.drawMatchesKnn(sample_image, kp1, queryImage,
                                             kp2, matches, None, **drawParams)
        comparisonImageList.append((comparisonImage, matchRatio))  # 记录下结果
        comparisonImageList.sort(key=lambda x: x[1], reverse=True)
    return comparisonImageList  # 按照匹配度排序
예제 #28
0
    def __init__(self, K):
        self.K = K
        self.match_ratio = 0.75
        self.max_features = 500
        self.kp_list_last = []
        self.des_list_last = []

        if False:
            # ORB (much faster)
            self.detector = cv2.ORB_create(self.max_features)
            self.extractor = self.detector
            norm = cv2.NORM_HAMMING
            self.matcher = cv2.BFMatcher(norm)
        else:
            # SIFT (in sparse cases can find better features, but a
            # lot slower)
            self.detector = cv2.SIFT_create(nfeatures=self.max_features,
                                            nOctaveLayers=5)
            self.extractor = self.detector
            norm = cv2.NORM_L2
            FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
            FLANN_INDEX_LSH = 6
            flann_params = {'algorithm': FLANN_INDEX_KDTREE, 'trees': 5}
            self.matcher = cv2.FlannBasedMatcher(
                flann_params, {})  # bug : need to pass empty dict (#1329)
예제 #29
0
def run():
    cat_amount = 200
    car_amount = 200
    data_size = cat_amount + car_amount

    # cat 0 car 1
    ml_data = get_images(cat_amount, 'cat') + get_images(car_amount, 'car')
    ml_target = [0] * cat_amount + [1] * car_amount

    sift = cv2.SIFT_create()
    des_list = []
    for data in ml_data:
        data = cv2.resize(data, (300, 300))
        kpts = sift.detect(data)
        _, des = sift.compute(data, kpts)
        des_list.append(des)

    descriptors = des_list[0]
    for descriptor in des_list[1:]:
        descriptors = np.vstack((descriptors, descriptor))

    k_means = 20
    voc, variance = kmeans(descriptors, k_means, 1)

    im_features = np.zeros((data_size, k_means), 'float32')
    for i in range(data_size):
        words, distance = vq(des_list[i], voc)
        for word in words:
            im_features[i][word] += 1

    print('data_size =', data_size)

    result = train_test_split(im_features, ml_target, test_size=0.2, random_state=0)
    print('Accuracy =', get_clf_result(*result))
예제 #30
0
def sift_ssim(image1, image2, reduction_factor=0.1, window_size=27):
    sift = cv2.SIFT_create()
    kp1, descriptor1 = sift.detectAndCompute(image1, None)
    kp2, descriptor2 = sift.detectAndCompute(image2, None)
    kp1 = kp_to_array(kp1)
    kp2 = kp_to_array(kp2)
    descriptor1, kp1 = calculate_k_descriptors(descriptor1, kp1,
                                               reduction_factor)
    descriptor2, kp2 = calculate_k_descriptors(descriptor2, kp2,
                                               reduction_factor)
    matches = find_match_points(descriptor1, descriptor2)
    ssims = np.array([])

    for match in matches:
        p1 = kp1[match.queryIdx]
        p2 = kp2[match.trainIdx]
        window1 = get_window(image1, p1, window_size)
        window2 = get_window(image2, p2, window_size)
        if window1.shape == (window_size,
                             window_size) and window2.shape == (window_size,
                                                                window_size):
            ssims = np.append(ssims, ssim(window1, window2))

    score = np.sum(ssims) / len(descriptor1)
    Q_K = len(matches) / len(descriptor1)

    return {'sift_ssim': np.around(score, 5), 'Q/K': np.around(Q_K, 4)}