Example #1
0
def findMatchPair_ORB(tar_img,
                      ref_img,
                      ptsNum=10000,
                      save=False,
                      fileName='ORB_matching_pair.npy',
                      RANSAC=True,
                      projError=50):
    mask = None
    H = None
    # Initiate ORB detector

    orb = cv2.ORB_create(ptsNum)
    # find the keypoints and descriptors with ORB
    ref_kp, ref_des = orb.detectAndCompute(ref_img, None)
    tar_kp, tar_des = orb.detectAndCompute(tar_img, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(ref_des, tar_des)
    src_pts = np.array([tar_kp[match.trainIdx].pt for match in matches])
    dst_pts = np.array([ref_kp[match.queryIdx].pt for match in matches])
    if RANSAC:
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, projError)
        src_pts = np.array([
            tar_kp[match.trainIdx].pt for idx, match in enumerate(matches)
            if mask[idx] == 1
        ])
        dst_pts = np.array([
            ref_kp[match.queryIdx].pt for idx, match in enumerate(matches)
            if mask[idx] == 1
        ])

    return src_pts, dst_pts, tar_kp, ref_kp, matches, mask
def compareKeyPoints(comparisons, image):
    """[Compares an input image to the comparisons list which contains all the predefined key points for the symbols]
    
    Arguments:
        comparisons {[List]} -- [the list of predetermined key points and descriptors for the symbols]
        image {[Objct]} -- [the input image]
    
    Returns:
        [String] -- [the classified symbol label]
    """
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    shape = image.shape
    cropped = im.cropImage(image, [
        int(shape[1] * 0.20),
        int(shape[0] * 0.05),
        int(shape[1] * 0.80),
        int(shape[0] * 0.46)
    ])
    imageFeatures = getKeyPoints(cropped)
    if imageFeatures[0]:
        bestMatch = [], []
        bestValue = np.inf
        for comparitor in comparisons:
            matches = bf.match(comparitor[2], imageFeatures[1])
            sort_match = sorted(matches, key=lambda x: x.distance)
            firstFive = sort_match[:5]
            average = np.mean([x.distance for x in firstFive])
            if average < bestValue:
                bestMatch = (comparitor, matches)
                bestValue = average
        returnVal = bestMatch[0][3].split(".")[0]
    else:
        returnVal = "(none)"
    return returnVal
Example #3
0
def q3(image, sift1):
    center = get_img_center(image)
    # a) Rotate the given image clockwise by 60 degrees.
    rot = imutils.rotate_bound(image, 60)
    #rot = rotate(image, center[0], center[1], 60)
    plt.imshow(rot), plt.show()
    # b) Extract the SIFT features and show the keypoints on the rotated image using the same
    # parameter setting as for Task 1 (for the reduced number of keypoints).
    keyPoints1, des1 = sift1.detector.detectAndCompute(image, None)
    keyPoints2, des2 = sift1.detector.detectAndCompute(rot, None)

    bf = cv2.BFMatcher()
    # c)the keypoints in both images similar which shows that they share the same common features.

    # d) Match the SIFT descriptors of the keypoints of the rotated image with those of the original
    #image using the nearest-neighbour distance ratio method
    matches = bf.match(des1, des2)
    # We sort them in ascending order of their distances so that best matches (with low distance) come to front
    matches = sorted(matches, key=lambda x: x.distance)

    # Show the keypoints of the 5 best-matching descriptors on both the original and the scaled image.
    img_q3 = cv2.drawMatches(
        image, keyPoints1, rot, keyPoints2, matches[:7], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    plt.imshow(img_q3), plt.show()
    img3 = cv2.drawKeypoints(image, keyPoints1, image)
    cv2.imwrite('b3.jpg', img3)
    cv2.imwrite('d3.jpg', img_q3)
Example #4
0
def ORB_matching(img1, img2):
    # Initiate ORB detector
    orb = cv.ORB_create(nfeatures = 120000) #specifying maximum nr of keypoints to locate

    image1 = cv.cvtColor(img1, cv.COLOR_BGR2RGB)
    image2 = cv.cvtColor(img2, cv.COLOR_BGR2RGB)
    image1_gray = cv.cvtColor(image1, cv.COLOR_BGR2GRAY)
    image2_gray = cv.cvtColor(image2, cv.COLOR_BGR2GRAY)

    # find the keypoints and descriptors with ORB
    kp1, des1 = orb.detectAndCompute(image1_gray, None)
    kp2, des2 = orb.detectAndCompute(image2_gray, None)

    # create BFMatcher object
    bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)

    # Match descriptors.
    matches = bf.match(des1, des2)

    # Sort them in the order of their distance.
    matches = sorted(matches, key = lambda x:x.distance)

    # Need to draw only good matches, using 4500 best
    p1 = np.array([kp1[m.queryIdx].pt for m in matches[:4500]])
    p2 = np.array([kp2[m.trainIdx].pt for m in matches[:4500]])

    des = des1[[m.queryIdx for m in matches[:4500]], :]

    print(f"Found {len(matches)} matches. Using {len(p1)} matches with shortest distance.")

    return p1, p2, des
def keypointsMatcher(queryImage, testFolder="./test", modelFolder="./model"):
    sift = cv2.xfeatures2d.SIFT_create()
    img1 = cv2.imread(os.path.join(testFolder, queryImage), 0)  # queryImage

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)

    # read through the model folder
    listOfModel = os.listdir(modelFolder)
    print("Number of models " + str(len(listOfModel)))

    for model in listOfModel:

        keyAndDescriptor = readDescriptorFileAndDrawKp(model)
        # print(keyAndDescriptor)

        kp2 = keyAndDescriptor
        des2 = keyAndDescriptor[1]

        # BFMatcher with default params
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des1, des2, k=2)
        print(len(np.array(matches).shape))
        print(np.array(matches).shape[1])
        # Apply ratio test
        good = []
        # good_without_list = []

        if len(np.array(matches).shape) == 2 and np.array(
                matches).shape[1] == 2:
            print(model)
            for m, n in matches:
                if m.distance < 0.9 * n.distance:
                    good.append([m])
Example #6
0
    def bag_of_words(self, training_paths):
        if os.path.isfile(BOW_DICTIONARY_FILENAME):
            print('Reading BOW from file')
            with open(BOW_DICTIONARY_FILENAME, 'rb') as f:
                dictionary = np.genfromtxt(f, delimiter=",", dtype=np.float32)
        else:
            print('Adding SIFT descriptors to BOW')
            sift_bar = Bar('SIFT', max=len(training_paths))
            for p in training_paths:
                image = cv2.imread(p)
                gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                kp, dsc = self.sift.detectAndCompute(gray, None)
                self.BOW.add(dsc)
                sift_bar.next()
            sift_bar.finish()
            print(datetime.datetime.now())
            print('Building BOW cluster')
            dictionary = self.BOW.cluster()
            print('Saving BOW dictionary to file')
            with open(BOW_DICTIONARY_FILENAME, 'wb') as f:
                np.savetxt(f, dictionary, delimiter=",")

        print(datetime.datetime.now())
        print('Building BOW dictionary')
        self.bowDiction = cv2.BOWImgDescriptorExtractor(
            self.sift2, cv2.BFMatcher(cv2.NORM_L2))
        self.bowDiction.setVocabulary(dictionary)
        print("BOW dictionary: ", np.shape(dictionary))
Example #7
0
def calculate_ave_dist(desa, desb):
    bf = cv.BFMatcher()
    matches = bf.match(desa, desb)
    matches = sorted(matches, key=lambda x: x.distance)
    nn = int(len(matches) / 100)
    dists = [m.distance for m in matches[0:nn]]
    aved = sum(dists) / len(dists)

    return aved
Example #8
0
def get_matches(train_desc, new_desc):
    '''
        Match descriptors and sort them in the order of their distance
    '''
    #cv2.NORM_HAMMING2
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(train_desc, new_desc)
    dmatches = sorted(matches, key=lambda x: x.distance)

    return dmatches
def count_matched_point(desa, desb):
    bf = cv.BFMatcher()
    matches = bf.knnMatch(desa, desb, k=2)
    #マッチングリスト
    ratio = 0.8
    matched = []
    for match1, match2 in matches:
        if match1.distance < ratio * match2.distance:
            matched.append([match1])

    return len(matched)
Example #10
0
    def get_bf_matches(self):
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(self.query_descs, self.train_descs, k=2)

        good = []

        for m, n in matches:
            if m.distance < DISTANCE_CORRECT * n.distance:
                good.append(m)

        matches = sorted(good, key=lambda x: x.distance)
        return matches
Example #11
0
def orb_stitcher(imgs):
    # find the keypoints with ORB
    orb1 = cv2.ORB_create(1000, 1.1, 13)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)

    kp_master, des_master = orb1.detectAndCompute(imgs[0], None)
    kp_secondary, des_secondary = orb1.detectAndCompute(imgs[1], None)

    matches = bf.match(des_secondary, des_master)
    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    selected = []
    for m in matches:
        if m.distance < 40:
            selected.append(m)

    out_img = cv2.drawMatches(imgs[1], kp_secondary, imgs[0], kp_master,
                              selected, None)
    cv2.namedWindow('www', cv2.WINDOW_NORMAL)
    cv2.imshow('www', out_img)
    # cv2.imwrite('matches.jpg',out_img)
    cv2.waitKey(0)

    warped = None
    if len(selected) > 10:

        dst_pts = np.float32([kp_master[m.trainIdx].pt
                              for m in selected]).reshape(-1, 1, 2)
        src_pts = np.float32([kp_secondary[m.queryIdx].pt
                              for m in selected]).reshape(-1, 1, 2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        h, w = imgs[0].shape[0:2]
        pts = np.float32([[0, 0], [w, 0], [w, h], [0, h],
                          [0, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        max_extent = np.max(dst, axis=0)[0].astype(np.int)[::-1]
        sz_out = (max(max_extent[1],
                      imgs[0].shape[1]), max(max_extent[0], imgs[0].shape[0]))

        # img2 = cv2.polylines(imgs[0], [np.int32(dst)], True, [0,255,0], 3, cv2.LINE_AA)

        cv2.namedWindow('w', cv2.WINDOW_NORMAL)

        warped = cv2.warpPerspective(imgs[1], M, dsize=sz_out)
        img_for_show = warped.copy()
        img_for_show[0:imgs[0].shape[0], 0:imgs[0].shape[1], 1] = imgs[0][:, :,
                                                                          1]
        cv2.imshow('w', img_for_show)
        cv2.waitKey(0)
    return warped
Example #12
0
def isImageSimilar(des1, des2):
    # Creates BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    # Matches descriptors.
    matches = bf.match(des1, des2)
    numberOfMatches = len(matches)
    if numberOfMatches == 0:
        return None, None
    # Calculates
    avgDistance = 0
    for match in matches:
        avgDistance = avgDistance + match.distance
    avgDistance = avgDistance / numberOfMatches
    return numberOfMatches, avgDistance
Example #13
0
 def matchAKAZE(self, projErr=5, crossCheck=True):
     matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=crossCheck)
     matches = matcher.match(self.tar_des, self.ref_des)
     src_pts = np.array([self.tar_kps[match.queryIdx].pt for match in matches])
     dst_pts = np.array([self.ref_kps[match.trainIdx].pt for match in matches])
     _, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, projErr)
     src_pts = np.array([self.tar_kps[match.queryIdx].pt for idx,
                         match in enumerate(matches) if mask[idx] == 1])
     dst_pts = np.array([self.ref_kps[match.trainIdx].pt for idx,
                         match in enumerate(matches) if mask[idx] == 1])
     self.mask = mask
     self.matches = matches
     self.matchNum = len(mask==1)
     self.src_pts = src_pts
     self.dst_pts = dst_pts
Example #14
0
def bf_matcher(des1, des2):
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)  # 匹配描述子
    draw_good = []
    prin_good = []
    p1 = []
    p2 = []
    for m, n in matches:
        if m.distance < 0.88 * n.distance:  # 调整ratio
            draw_good.append([m])  # kd-tree二叉搜索树
            prin_good.append(m)
    for match in prin_good:
        p1.append(kp1[match.queryIdx].pt)
        p2.append(kp2[match.trainIdx].pt)
    # img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,draw_good,None,flags=2) # 绘制匹配连线图
    return p1
Example #15
0
def match(test, exemplar_descs):
    """using ORB decriptors compute match between test and exemplar_descs.
    
    :param test: image to test
    :type test: cv2 image
    :param exemplar: exemplar descriptions
    :type exemplar: list of orb descriptors
    :return: distance sorted matches
    :rtype: list
    """
    # create an ORB object
    orb = cv2.ORB_create()

    # convert to GRAYSCALE
    test_g = convert.bgr_to_gray(test)

    # Find the keypoints and descriptors with ORB
    _, descs = orb.detectAndCompute(test_g, None)

    # create a brute force matcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # init to hold the best prediciton
    prediction = ('(none)', np.inf)

    # match with each exmplar
    for d in exemplar_descs:
        # match the descriptors
        matches = bf.match(d[1], descs)

        # sort in order of distance, lowest first
        sorted_matches = sorted(matches, key=lambda x: x.distance)

        # extract just the distances
        distances = [m.distance for m in sorted_matches]

        # calculate a score
        score = sum(distances[:4])

        # no matches
        if score == 0:
            score = np.inf
        # update prediction because this match is closer
        if score < prediction[1]:
            prediction = (d[0], score)

    return prediction[0]
Example #16
0
def viewImage():
    img_ = right()
    # img_ = cv2.resize(img_, (0,0), fx=1, fy=1)
    img1 = cv2.cvtColor(img_, cv2.COLOR_BGR2GRAY)
    img = rear()

    # img = cv2.resize(img, (0,0), fx=1, fy=1)
    img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    sift = cv2.xfeatures2d.SIFT_create()
    # find the key points and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    match = cv2.BFMatcher()
    matches = match.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.8 * n.distance:
            good.append(m)

    draw_params = dict(
        matchColor=(0, 255, 0),  # draw matches in green color
        singlePointColor=None,
        flags=2)
    img3 = cv2.drawMatches(img_, kp1, img, kp2, good, None, **draw_params)
    cv2.imshow("original_image_drawMatches.jpg", img3)

    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        h, w = img1.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
        cv2.imshow("original_image_overlapping.jpg", img2)

    # dst = np.concatenate((rightImg, rearImg), 1)
    # cv2.imshow("Image", )
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #17
0
def getPoints_SIFT(im1, im2):
    p1 = []
    p2 = []
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(im1, None)
    kp2, des2 = sift.detectAndCompute(im2, None)

    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # Apply ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.4 * n.distance:
            good.append([m])
            img1_idx = m.queryIdx
            img2_idx = m.trainIdx
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt
            p1.append((x1, y1))
            p2.append((x2, y2))

    # cv2.drawMatchesKnn expects list of lists as matches.
    img_match = np.zeros(
        (im1.shape[0] + im2.shape[0], im1.shape[1] + im2.shape[1]))
    img_match = cv2.drawMatchesKnn(im1,
                                   kp1,
                                   im2,
                                   kp2,
                                   good,
                                   img_match,
                                   flags=2)
    # print(len(p1))
    plt.imshow(img_match), plt.show()
    p1 = np.asarray(p1).T
    p2 = np.asarray(p2).T
    # p1, p2 = get_coordinate_from_sift(matches, kp1, kp2)
    return p1, p2
Example #18
0
def findMatchPair_GMS(tar_img,
                      ref_img,
                      ptsNum=10000,
                      save=False,
                      fileName='GMS_matching_pair.npy',
                      RANSAC=True,
                      projError=50):
    # Initiate ORB detector
    orb = cv2.ORB_create(ptsNum, fastThreshold=0)
    # find the keypoints and descriptors with ORB
    ref_kp, ref_des = orb.detectAndCompute(ref_img, None)
    tar_kp, tar_des = orb.detectAndCompute(tar_img, None)

    # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING)
    matches = bf.match(ref_des, tar_des)
    matches_GMS = cv2.xfeatures2d.matchGMS(ref_img.shape[0:2],
                                           tar_img.shape[0:2],
                                           ref_kp,
                                           tar_kp,
                                           matches,
                                           withRotation=True)
    src_pts = np.array([tar_kp[match.trainIdx].pt for match in matches_GMS])
    dst_pts = np.array([ref_kp[match.queryIdx].pt for match in matches_GMS])
    if RANSAC:
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, projError)
        src_pts = np.array([
            tar_kp[match.trainIdx].pt for idx, match in enumerate(matches_GMS)
            if mask[idx] == 1
        ])
        dst_pts = np.array([
            ref_kp[match.queryIdx].pt for idx, match in enumerate(matches_GMS)
            if mask[idx] == 1
        ])

    if save:
        with open(fileName, 'wb') as f:
            np.save(f, src_pts)
            np.save(f, dst_pts)

        print('GMS matching pairs saved')
    return src_pts, dst_pts, tar_kp, ref_kp, matches_GMS, mask
Example #19
0
def findId(img, desList, thres=15):
    kp2, des2 = orb.detectAndCompute(img, None)
    bf = cv2.BFMatcher()
    matchList = []
    finalVal = -1
    try:
        for des in desList:
            matches = bf.knnMatch(des, des2, k=2)
            good = []
            for m, n in matches:
                if m.distance < 0.75 * n.distance:
                    good.append([m])
            matchList.append(len(good))
    except:
        pass
    # print(matchList)
    if len(matchList) != 0:
        if max(matchList) > thres:
            finalVal = matchList.index(max(matchList))
    return finalVal
Example #20
0
 def KNNmatchAKAZE(self,projErr=5):
     matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
     matches = matcher.knnMatch(self.tar_des, self.ref_des, k=2)
     good_matches = []
     for m,n in matches:
         if m.distance < 0.75*n.distance:
             good_matches.append(m)
     src_pts = np.array([self.tar_kps[match.queryIdx].pt for match in good_matches])
     dst_pts = np.array([self.ref_kps[match.trainIdx].pt for match in good_matches])
     
     _, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, projErr)
     src_pts = np.array([self.tar_kps[match.queryIdx].pt for idx,
                         match in enumerate(good_matches) if mask[idx] == 1])
     dst_pts = np.array([self.ref_kps[match.trainIdx].pt for idx,
                         match in enumerate(good_matches) if mask[idx] == 1])
     self.mask = mask
     self.matches = good_matches
     self.matchNum = len(mask[mask==1])
     self.src_pts = src_pts
     self.dst_pts = dst_pts
Example #21
0
def match_image_to_model(X, model_des, query_img, threshold = 0.75):


    orb = cv.ORB_create(nfeatures = 120000) #specifying maximum nr of keypoints to locate

    img_gray = cv.cvtColor(query_img, cv.COLOR_BGR2GRAY)

    # find the keypoints and descriptors with ORB
    query_kp, query_des = orb.detectAndCompute(img_gray, None)

    # create BFMatcher object
    bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)

    # Match descriptors.
    matches = bf.match(model_des, query_des)

    # Need to draw only good matches
    matches = sorted(matches, key = lambda x:x.distance)
    
    matched_2D_points = np.array([query_kp[m.trainIdx].pt for m in matches[:4000]])
    matched_3D_points = X[:,[m.queryIdx for m in matches[:4000]]]

    return matched_2D_points, matched_3D_points
Example #22
0
def q2(image, sift):
    # a)Enlarge the given image by a scale percentage of 115.
    scale = 115
    scale = scale/100

    width = int(image.shape[1] * scale)
    height = int(image.shape[0] * scale)
    new_dim = (width, height)
    resized = cv2.resize(image, new_dim)
    

    # b) Extract the SIFT features and show the keypoints on the scaled image using the same
    # parameter setting as for Task 1 (for the reduced number of keypoints).
    ## find the keypoints and descriptors using sift detector
    keyPoints1, des1 = sift.detector.detectAndCompute(image, None)
    # Since I have  already found keypoints, call sift.compute() which computes the descriptors 
    # from the keypoints that has been already found
    keyPoints2, des2 = sift.detector.detectAndCompute(resized, None)
    img2 = cv2.drawKeypoints(image, keyPoints1, image)
    # Hint: Brute-force matching is available in OpenCV for feature matching.
    bf_matcher = cv2.BFMatcher()
    #use Matcher.match() method to get the best matches in two images
    matches = bf_matcher.match(des1, des2)
    #matches = bf_matcher.knnMatch(des1, des2, k=2)
    # c)the keypoints in both images similar which shows that they share the same common features.

    # d) Match the SIFT descriptors of the keypoints of the scaled image with those of the original image 
    # using the nearest-neighbour distance ratio method
    # We sort them in ascending order of their distances so that best matches (with low distance) come to front
    matches = sorted(matches, key=lambda x: x.distance)

    # Show the keypoints of the 5 best-matching descriptors on both the original and the scaled image.
    img_q2=cv2.drawMatches(image, keyPoints1, resized, keyPoints2,
                           matches[:6], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    plt.imshow(img_q2), plt.show()
    cv2.imwrite('d2.jpg', img_q2)
    cv2.imwrite('b2.jpg', img2)
Example #23
0
def init_feature(name):
    chunks = name.split('-')

    if chunks[0] == 'sift':
        detector = cv2.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None  # Return None if unknown detector name

    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        else:
            flann_params = dict(
                algorithm=FLANN_INDEX_LSH,
                table_number=6,  # 12
                key_size=12,  # 20
                multi_probe_level=1)  # 2

        matcher = cv2.FlannBasedMatcher(flann_params)
    else:
        matcher = cv2.BFMatcher(norm)

    return detector, matcher
Example #24
0
def rectify_pair(image_left, image_right, viz=False):
    #特征点匹配
    #1.用surf进行特征点检测
    grayL = cv2.cvtColor(image_left, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(image_right, cv2.COLOR_BGR2GRAY)
    surf = cv2.xfeatures2d.SURF_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = surf.detectAndCompute(grayL, None)
    kp2, des2 = surf.detectAndCompute(grayR, None)
    img = cv2.drawKeypoints(grayL, kp1, image_left)
    cv2.imshow("keyPointsOfLeft", img)

    #2.用BFMATCHER进行特征点匹配
    bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=False)
    # 特征描述子匹配
    matches = bf.match(des1, des2)
    points1 = []
    points2 = []
    for match in matches:
        points1.append(kp1[match.queryIdx].pt)
        points2.append(kp2[match.trainIdx].pt)
    #matches=sorted(matches,key=lambda x:x.distance)
    # print(len(matches))
    img3 = cv2.drawMatches(grayL, kp1, grayR, kp2, matches[:20], None, flags=2)
    cv2.imshow('matches', img3)

    # find the fundamental matrix
    F, mask = cv2.findFundamentalMat(np.array(points1), np.array(points2),
                                     cv2.RANSAC, 3, 0.99)

    # rectify the images, produce the homographies: H_left and H_right
    retval, H_left, H_right = cv2.stereoRectifyUncalibrated(
        np.array(points1), np.array(points2), F, image_left.shape[:2])

    return F, H_left, H_right
#opencv----特征匹配----BFMatching
from cv2 import cv2
from matplotlib import pyplot as plt
#读取需要特征匹配的两张照片,格式为灰度图。
template = cv2.imread("template_adjust.jpg", 0)
target = cv2.imread("target1.jpg", 0)
orb = cv2.ORB_create()  #建立orb特征检测器
kp1, des1 = orb.detectAndCompute(template, None)  #计算template中的特征点和描述符
kp2, des2 = orb.detectAndCompute(target, None)  #计算target中的
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)  #建立匹配关系
mathces = bf.match(des1, des2)  #匹配描述符
mathces = sorted(mathces, key=lambda x: x.distance)  #据距离来排序
result = cv2.drawMatches(template,
                         kp1,
                         target,
                         kp2,
                         mathces[:40],
                         None,
                         flags=2)  #画出匹配关系
plt.imshow(result), plt.show()  #matplotlib描绘出来
Example #26
0
from cv2 import cv2
import numpy as np

img1 = cv2.imread(r'Image Test/tiger.jpg', 0)
img2 = cv2.imread(r'Image Train/tiger.jpg', 0)

orb = cv2.ORB_create(nfeatures=1000)
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
    if m.distance < 0.75 * n.distance:
        good.append([m])
print(len(good))
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow('img1', img1)
cv2.imshow('img2', img2)
cv2.imshow('img3', img3)
cv2.waitKey(0)
Example #27
0
def tracking_orb():

    cap = cv2.VideoCapture('find_chocolate.mp4')

    ret, frm = cap.read()
    img_chocolate = cv2.imread('marker.jpg')

    frm_count = 0
    key = None

    # Setting video format. Google for "fourcc"
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # Setting up new video writer
    image_size = (frm.shape[1], frm.shape[0])
    # writer = cv2.VideoWriter('sample_tracking_orb.avi', fourcc, frames_per_second, image_size)
    out = cv2.VideoWriter('sample_tracking_orb.mp4', fourcc, 30.0, image_size)

    while ret:

        ## Create ORB object and BF object(using HAMMING)
        orb = cv2.ORB_create()

        gray2 = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
        gray1 = cv2.cvtColor(img_chocolate, cv2.COLOR_BGR2GRAY)

        # gray2 = cv2.equalizeHist(gray2)
        # gray1 = cv2.equalizeHist(gray1)

        ## Find the keypoints and descriptors with ORB
        kpts1, descs1 = orb.detectAndCompute(gray1, None)
        kpts2, descs2 = orb.detectAndCompute(gray2, None)

        # create BFMatcher object
        ## match descriptors and sort them in the order of their distance
        bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck=True)

        # Match descriptors.
        matches = bf.match(descs1, descs2)

        # Sort them in the order of their distance.
        dmatches = sorted(matches, key=lambda x: x.distance)

        ## extract the matched keypoints
        src_pts = np.float32([kpts1[m.queryIdx].pt
                              for m in dmatches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kpts2[m.trainIdx].pt
                              for m in dmatches]).reshape(-1, 1, 2)

        ## find homography matrix and do perspective transform
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        h, w = img_chocolate.shape[:2]
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)

        ## draw found regions
        frm = cv2.polylines(frm, [np.int32(dst)], True, (0, 0, 255), 1,
                            cv2.LINE_AA)

        ## draw match lines
        res = cv2.drawMatches(img_chocolate,
                              kpts1,
                              frm,
                              kpts2,
                              dmatches[:8],
                              None,
                              flags=2)

        # writer.write(res)
        cv2.namedWindow('orb_match', cv2.WINDOW_NORMAL)
        # cv2.imshow("orb_match", frm)
        out.write(frm)
        cv2.imshow("orb_match", res)

        # Pause on pressing of space.
        if key == ord(' '):
            wait_period = 0
        else:
            wait_period = 30

        key = cv2.waitKey(wait_period)
        ret, frm = cap.read()
        frm_count += 1

    cv2.destroyAllWindows()
    cap.release()
    out.release()

    return 0
Example #28
0
def tracking_lucas_kanade():

    cap = cv2.VideoCapture('find_chocolate.mp4')

    img_chocolate = cv2.imread('marker.jpg')
    gray_chocolate = cv2.cvtColor(img_chocolate, cv2.COLOR_BGR2GRAY)

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=1000,
                          qualityLevel=0.2,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(35, 35),
                     maxLevel=4,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (1000, 3))

    # Take first frame and find corners in it

    ret, old_frame = cap.read()

    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)

    orb = cv2.ORB_create(1000, 1.1, 13)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
    kpts1, descs1 = orb.detectAndCompute(gray_chocolate, None)

    # Setting video format. Google for "fourcc"
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # Setting up new video writer
    image_size = (old_frame.shape[1], old_frame.shape[0])
    # writer = cv2.VideoWriter('sample_tracking_orb.avi', fourcc, frames_per_second, image_size)
    out = cv2.VideoWriter('sample_tracking_lucas_kanade.avi', fourcc, 30.0,
                          image_size)

    frno = 0
    restart = False
    while (1):
        frno += 1
        ret, frame = cap.read()
        if ret:

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if restart:
                orb = cv2.ORB_create(1000, 1.1, 13)
                kpts2, descs2 = orb.detectAndCompute(frame_gray, None)
                restart = False

            kpts2, descs2 = orb.detectAndCompute(frame_gray, None)

            matches = bf.match(descs1, descs2)
            # Sort them in the order of their distance.
            dmatches = sorted(matches, key=lambda x: x.distance)

            ## extract the matched keypoints
            src_pts = np.float32([kpts1[m.queryIdx].pt
                                  for m in dmatches]).reshape(-1, 1, 2)
            dst_pts = np.float32([kpts2[m.trainIdx].pt
                                  for m in dmatches]).reshape(-1, 1, 2)

            ## find homography matrix and do perspective transform
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            h, w = img_chocolate.shape[:2]
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            dst = cv2.perspectiveTransform(pts, M)

            ## draw found regions
            frm = cv2.polylines(frame, [np.int32(dst)], True, (0, 0, 255), 1,
                                cv2.LINE_AA)

            # ## draw match lines
            # res = cv2.drawMatches(img_chocolate, kpts1, frm, kpts2, dmatches[:8], None, flags=2)

            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray,
                                                   dst_pts, None, **lk_params)
            successful = (st == 1)
            if np.sum(successful) == 0:
                restart = True
            # Select good points
            good_new = p1[successful]
            good_old = dst_pts[successful]

            # draw the tracks
            count_of_moved = 0
            for i, (new, old) in enumerate(zip(good_new, good_old)):
                a, b = new.ravel()
                c, d = old.ravel()
                velocity = np.sqrt((a - c)**2 + (b - d)**2)
                if velocity > 1:
                    mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
                    frame = cv2.circle(frame, (a, b), 4, color[i].tolist(), -1)
                    count_of_moved += 1

            # res = cv2.drawMatches(img_chocolate, kpts1, frm, kpts2, dmatches, None, flags=2) #[:8]
            out.write(frame)

            cv2.namedWindow('orb_match', cv2.WINDOW_NORMAL)

            cv2.imshow('orb_match', frame)

            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break

            # Now update the previous frame and previous points
            old_gray = frame_gray.copy()
            p0 = good_new.reshape(-1, 1, 2)

        else:
            break

    cv2.destroyAllWindows()
    cap.release()
    out.release()
# 特徴量の抽出と画像間チング
# 特徴量:ORB
import cv2.cv2 as cv

# 画像ファイルの読み込み
img1 = cv.imread('./imagedata/image6.jpg')
img1 = cv.resize(img1, (600, 400))
img2 = cv.imread('./imagedata/image2.jpg')
img2 = cv.resize(img2, (600, 400))
# ORB特徴検出器
detector = cv.ORB_create()
# 特徴量抽出
kp1, des1 = detector.detectAndCompute(img1, None)
kp2, des2 = detector.detectAndCompute(img2, None)
bf = cv.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
#マッチングリスト
matched = []
for match1, match2 in matches:
    ratio = match1.distance / match2.distance
    if ratio < 0.8:
        matched.append([match1])
# 画像表示
imgmatches = cv.drawMatchesKnn(img1, kp1, img2, kp2, matched, None, flags=2)
cv.imshow("image matching", imgmatches)

cv.waitKey(0)
cv.destroyAllWindows()
Example #30
0
def main():
    homography = None
    # matrix of camera parameters (made up but works quite well for me)
    camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])

    # create ORB - Oriented FAST and Rotated BRIEF - keypoint detector
    orb = cv2.ORB_create(nfeatures=1000)  # retain max 1000 features

    # create BFMatcher object
    bf = cv2.BFMatcher()

    # image target
    model = cv2.imread('target_img.jpg')

    # calculate key point and description
    kp_model, des_model = orb.detectAndCompute(
        model, None)  # kp: key point, des: description

    # obj file
    obj = OBJ('wolf.obj', swapyz=True)

    # Webcam
    webcam = cv2.VideoCapture(0)

    while True:
        success, imgwebcam = webcam.read()
        # find and draw the keypoints of the frame
        kp_webcam, des_webcam = orb.detectAndCompute(imgwebcam, None)

        # finding match between 2 img
        matches = bf.knnMatch(des_model, des_webcam, k=2)
        # Taking good keypoints
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append(m)

        # compute Homography if enough matches are found
        if len(good) > 15:
            # differenciate between source points and destination points
            srcpts = np.float32([kp_model[m.queryIdx].pt
                                 for m in good]).reshape(-1, 1, 2)
            dstpts = np.float32([kp_webcam[m.trainIdx].pt
                                 for m in good]).reshape(-1, 1, 2)

            # compute Homography
            homography, mask = cv2.findHomography(srcpts, dstpts, cv2.RANSAC,
                                                  5)

            #find boundary around model
            h, w, channel = model.shape
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            # project corners into frame
            dst = cv2.perspectiveTransform(pts, homography)
            # connect them with lines
            #imgwebcam = cv2.polylines(imgwebcam,[np.int32(dst)], True, 255, 3, cv2.LINE_AA)

            # if a valid homography matrix was found render object on model plane
            if homography is not None:
                # obtain 3D projection matrix from homography matrix and camera parameters
                projection = projection_matrix(camera_parameters, homography)
                # render object
                imgwebcam = render(imgwebcam, obj, projection, model)
                #imgwebcam = render(imgwebcam, model, projection)

        cv2.imshow('result', imgwebcam)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    webcam.release()
    cv2.destroyAllWindows()
    return 0