コード例 #1
0
ファイル: detectlid.py プロジェクト: CnrLwlss/Colonyzer
def checkMatches(fname,posfeats,negfeats,frects,orb,pmatchdist=20,nmatchdist=20,draw=False):
    '''Check whether image in fname is more similar to features in posfeats or features in negfeats'''
    posmatches=getMatches(fname,posfeats,frects,orb,draw)
    negmatches=getMatches(fname,negfeats,frects,orb,draw)
    testmatches=testMatches(posmatches,negmatches,pmatchdist,nmatchdist)
    if draw:
        for i,gm,feat in zip(range(0,len(posfeats)),posmatches,posfeats):
            plate=gm["plate"]
            kp_plate=gm["kp_plate"]
            feature=feat["plate"]
            kp_feature=feat["kp"]
            fmatches=testmatches["posMatches"][i]
            print("Number of matches: "+str(len(fmatches)))
            plt.figure(figsize=(20,20))
            img3 = cv2.drawMatches(feature,kp_feature,plate,kp_plate,fmatches, flags=2,outImg=None)
            plt.imshow(img3),plt.savefig(os.path.basename(fname)[0:-4]+"_PosMatches_{:03}.png".format(i),bbox_inches='tight', pad_inches=0)
        for i,gm,feat in zip(range(0,len(negfeats)),negmatches,negfeats):
            plate=gm["plate"]
            kp_plate=gm["kp_plate"]
            feature=feat["plate"]
            kp_feature=feat["kp"]
            fmatches=testmatches["negMatches"][i]
            print("Number of matches: "+str(len(fmatches)))
            plt.figure(figsize=(20,20))
            img3 = cv2.drawMatches(feature,kp_feature,plate,kp_plate,fmatches, flags=2,outImg=None)
            plt.imshow(img3),plt.savefig(os.path.basename(fname)[0:-4]+"_NegMatches_{:03}.png".format(i),bbox_inches='tight', pad_inches=0)
    return(testmatches["hit"])
コード例 #2
0
ファイル: fuzzy-sansa.py プロジェクト: chapinb/fuzzy-sansa
def img_match(f1, f2):
    from matplotlib import pyplot as plt

    img1 = cv2.imread(f1, 0) # Query Image
    img2 = cv2.imread(f2, 0) # Training Image

    # Initiate SIFT detector
    orb = cv2.ORB()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)

    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # Match descriptors.
    matches = bf.match(des1, des2)

    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    # Draw first 10 matches.
    img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], flags=2)

    plt.imshow(img3), plt.show()
コード例 #3
0
ファイル: mono_avoid.py プロジェクト: simama/RealSense
def find_homography_matrix():
    kp1, des1, img1, kp2, des2, img2 = find_features()
    flann = cv2.FlannBasedMatcher(index_params,search_params)
    matches = flann.knnMatch(des1,des2,k=2)
    # Store all good matches as per Lowe's ratio test
    good = []
    for match in matches:
        if len(match) == 2:
            m,n = match
            if m.distance < 0.7 * n.distance:
                good.append(m)
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2)
        dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()
        print M
        h,w = img1.shape
        pts = np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0]]).reshape(-1,1,2)
        dst = cv2.perspectiveTransform(pts, M)

        img2 = cv2.polylines(img2,[np.int32(dst)],True,(0,0,255),3,cv2.LINE_AA)
    else:
        print ("Not enough matches are found - %d/%d") %(len(good),MIN_MATCH_COUNT)
        matchesMask = None

    draw_params = dict(matchColor = (0,0,255),
                        singlePointColor = None,
                        matchesMask = matchesMask,
                        flags = 2)
    img3 = cv2.drawMatches(img1, kp1, img2, kp2,good,None,**draw_params)
    return img3, M
コード例 #4
0
ファイル: TP3.py プロジェクト: rcatajar/vision
    def bf_matcher(source1, source2):
        """
        Question 1.3

        Trace les mises en correspondance des caractéristiques de deux images selon la méthode de "Brute Force"

        Parameters
        ----------
        source1: la source de l'image de gauche
        source2: la source de l'image de droite
        """
        # Initiate SIFT detector
        orb = cv2.ORB_create()
        cv2.ocl.setUseOpenCL(False)

        # find the keypoints and descriptors with SIFT
        kp1, des1 = orb.detectAndCompute(source1, None)
        kp2, des2 = orb.detectAndCompute(source2, None)

        # create BFMatcher object
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        # Match descriptors.
        matches = bf.match(des1, des2)

        # Sort them in the order of their distance.
        matches = sorted(matches, key=lambda x: x.distance)

        # Draw first 10 matches.
        img3 = cv2.drawMatches(source1, kp1, source2, kp2, matches[:10], None, flags=2)

        plt.imshow(img3), plt.show()
コード例 #5
0
def ORB_BF_Matching():
    img1 = cv2.imread('box.png',0)          # queryImage
    img2 = cv2.imread('box_in_scene.png',0) # trainImage

    # Initiate ORB detector
    orb = cv2.ORB_create()

    # find the keypoints and descriptors with SIFT
    # cv2.ORB.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) → keypoints, descriptors
    kp1, des1 = orb.detectAndCompute(img1,None)
    kp2, des2 = orb.detectAndCompute(img2,None)

    # create BFMatcher object
    # 第一个参数normType:指定要使用的距离测试类型,默认为cv2.Norm_L2.对于ORB,BRIEF,BRISK算法,要使用cv2.NORM_HAMMING
    # crossCheck默认为False,若设置为True则需双向匹配,否则只需单项匹配即可
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # Match descriptors.
    matches = bf.match(des1,des2)

    # Sort them in the order of their distance.
    # The result of matches = bf.match(des1,des2) line is a list of DMatch objects. This DMatch object has following attributes:
    #   DMatch.distance - Distance between descriptors. The lower, the better it is
    #   DMatch.trainIdx - Index of the descriptor in train descriptors
    #   DMatch.queryIdx - Index of the descriptor in query descriptors
    #   DMatch.imgIdx - Index of the train image
    matches = sorted(matches, key = lambda x:x.distance)

    # Draw first 10 matches.
    # cv2.drawMatches(img1, keypoints1, img2, keypoints2, matches1to2, outImg[, matchColor[, singlePointColor[, matchesMask[, flags]]]]]) → outImg
    img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10], None,flags=2)

    plt.imshow(img3),plt.show()
コード例 #6
0
ファイル: util.py プロジェクト: jonasagx/nephos
def createVisualMatch(im1, im2, detector, matcher):
	(kp1, des1) = detector.detectAndCompute(im1, None)
	(kp2, des2) = detector.detectAndCompute(im2, None)

	matches = matcher.match(des1,des2)
	im3 = cv.drawMatches(im1, kp1, im2, kp2, matches[:10], None, flags=2)
	return im3
コード例 #7
0
ファイル: Matcher.py プロジェクト: zhangxingshuo/py-mcl
    def SIFTMatch(self, imagePath, display_results=False):
        '''
        Performs a match using Scale-Invariant Feature Transform algorithm.
        Matching is done with Fast Library for Approximate Nearest Neighbors.
        Lowe's ratio test is applied.
        '''
        sift = cv2.xfeatures2d.SIFT_create()

        kp1, des1 = sift.detectAndCompute(self.image, None)
        kp2, des2 = self.index[imagePath]

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(des1, des2, k=2)
        filtered = list(filter(lambda x:x[0].distance < 0.7*x[1].distance, matches))
        good = list(map(lambda x: x[0], filtered))

        if display_results:
            draw_params = dict(matchColor=(0,255,0), 
                singlePointColor=None, 
                flags=2)

            result = cv2.drawMatches(self.image, kp1, training, kp2, good, None, **draw_params)
            plt.imshow(result), plt.show()

        return len(good)
コード例 #8
0
def SURF(img2, debug):
    # Initiate SURF detector
    surf = cv2.xfeatures2d.SURF_create()
    
    # find the keypoints and descriptors with SURF
    kp1, des1 = surf.detectAndCompute(img1,None)
    kp2, des2 = surf.detectAndCompute(img2,None)
    
    #draw the keypoints
    cv2.drawKeypoints(img1,kp1,None,(255,0,0),4)
    # BFMatcher (Brute Force Matcher) Iniitialize with default params
    bf = cv2.BFMatcher()
    #do brute force matching with k nearest neighbors
    matches = bf.knnMatch(des1,des2, k=2)
    
    # Apply distance ratio test
    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)
    
    if len(good)>10:
        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
    
    img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,flags=2)
    if img3!= None and debug:
        plt.imshow(img3, 'gray'),plt.show()
        print "Number of Features: ", len(good)
    return good
コード例 #9
0
def drawEpilinesAndMatches(imgL, imgR, pts1, pts2, kp1, kp2, good):

    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
    lines1 = lines1.reshape(-1,3)
    img3_L = imgL
    img3_R = imgR
    img3_L, img3_R = drawlines(img3_L,img3_R,lines1,pts1,pts2)
    
    fig = plt.figure()    
    plt.subplot(121)
    plt.imshow(imgL), plt.title('Input L (no lines should be written on these variables?)')
    plt.subplot(122)
    plt.imshow(imgR), plt.title('Input R (no lines should be written on these variables?)')
    plt.show()    
    
    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
    lines2 = lines2.reshape(-1,3)
    img4_L = imgL
    img4_R = imgR
    img4_L,img4_R = drawlines(img4_L,img4_R,lines2,pts2,pts1)

    
    # cv2.drawMatchesKnn expects list of lists as matches.
    # http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html
    imgDummy = np.zeros((1,1))
    img5 = cv2.drawMatches(imgL,kp1,imgR,kp2,good[:10],imgDummy)
    
    return img3_L, img3_R, img4_L, img4_R, img5, lines1, lines2
コード例 #10
0
ファイル: mycv.py プロジェクト: opqopq/BoardGameMaker
 def draw_matches(self, other, num=10):
     kp1, des1 = self.orb_keypoints()
     kp2, des2 = other.orb_keypoints()
     bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
     matches = bf.match(des1, des2)
     matches = sorted(matches, key = lambda x: x.distance)
     return cv2.drawMatches(self._ocvimg, kp1, imgunwrap(other), kp2, matches[:num], flags=2)
コード例 #11
0
ファイル: testORB.py プロジェクト: FoxRobotLab/ImageMatching
def tryToMatchFeatures(orb, img1, pointInfo, img2):
    kp2, des2 = pointInfo

    # find the keypoints and descriptors with ORB
    kp1, des1 = orb.detectAndCompute(img1,None)

    if des1 is None or des2 is None:
        return [], None, None
    elif len(des1) == 0 or len(des2) == 0:
        print("it thinks the key descriptions are empty")
        return [], None, None

    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.match(des1,des2)

    sortedMatches = sorted(matches, key = lambda x: x.distance)
    #for mat in matches:
    #    print mat.distance
    goodMatches = [mat for mat in matches if mat.distance < 300]
    print "Good match number:", len(goodMatches)
    
    matchImage = cv2.drawMatches(img1, kp1, img2, kp2, goodMatches,  
        None, matchColor = (255, 255, 0), singlePointColor=(0, 0, 255))
    cv2.imshow("Match Image", matchImage)
    cv2.waitKey(0)
    
    return goodMatches, kp1, kp2
コード例 #12
0
ファイル: Matcher.py プロジェクト: zhangxingshuo/py-mcl
    def ORBMatch(self, imagePath, display_results=False):
        '''
        Matches query against specified image using the Oriented FAST and Rotated BRIEF algorithm.
        Matching is done through Brute-Force.
        '''

        orb = cv2.ORB_create()

        kp1, des1 = orb.detectAndCompute(self.image, None)
        kp2, des2 = self.index[imagePath]

        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        matches = bf.match(des1, des2)

        matches = sorted(matches, key=lambda x: x.distance)

        if display_results:
            draw_params = dict(matchColor=(0,255,0), 
                singlePointColor=None, 
                flags=2)

            image = cv2.drawMatches(self.image, kp1, training, kp2, matches, None, **draw_params)
            plt.imshow(image), plt.show()

        return len(good)
コード例 #13
0
def drawMatches(image1, image2, matches, filename):
    '''
    Takes two images and matches and saves an image of those matches
    '''
    matchImage = cv2.drawMatches(image1.img, image1.kps, image2.img, image2.kps, matches, image1.img, flags=2)
    img = PILImage.fromarray(matchImage, 'RGB')
    img.save(filename)
コード例 #14
0
def drawmatches(matchesMask,img1,img2,kp1,kp2,good):
    draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                   singlePointColor = (255,0,0),
                   matchesMask = matchesMask, # draw only inliers
                   flags = 2)

    img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
    cv2.imwrite('diffrrrsift1817.png',img3)
コード例 #15
0
ファイル: aligner.py プロジェクト: volzotan/compressor
    def calculate_translation_values(self, image, warp_matrix):

        source_file = os.path.join(self.INPUT_DIR, image)

        if self.RESET_MATRIX_EVERY_LOOP:
            warp_matrix = self._create_warp_matrix() # reset

        im2 = self._read_image_and_crop(source_file, read_as_8bit=True) 

        # proceed with downsized version
        if self.DOWNSIZE:
            im2_downsized = cv2.resize(im2, (0,0), fx=1.0/self.DOWNSIZE_FACTOR, fy=1.0/self.DOWNSIZE_FACTOR)
        else:
            im2_downsized = im2

        im2_gray = cv2.cvtColor(im2_downsized, cv2.COLOR_BGR2GRAY)
        if self.USE_SOBEL:
            im2_gray = self._get_gradient(im2_gray)

        if self.ALGORITHM == "ECC":
            try:
                (cc, warp_matrix) = cv2.findTransformECC(self.reference_image_gray, im2_gray, warp_matrix, self.WARP_MODE, self.CRITERIA)
            except Exception as e:
                raise e

        if self.ALGORITHM == "ORB":
            orb = cv2.ORB_create(self.MAX_FEATURES)
            im2_gray = np.uint8(im2_gray)
            keypoints2, descriptors2 = orb.detectAndCompute(im2_gray, None)

            # Match features.
            matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
            matches = matcher.match(self.orb_descriptors1, descriptors2, None)
               
            # Sort matches by score
            matches.sort(key=lambda x: x.distance, reverse=False)
             
            # Remove not so good matches
            numGoodMatches = int(len(matches) * self.GOOD_MATCH_PERCENT)
            matches = matches[:numGoodMatches]
             
            # Draw top matches
            imMatches = cv2.drawMatches(self.reference_image_gray, self.orb_keypoints1, im2_gray, keypoints2, matches, None)
            cv2.imwrite(image + "_match.jpg", imMatches)
               
            # Extract location of good matches
            points1 = np.zeros((len(matches), 2), dtype=np.float32)
            points2 = np.zeros((len(matches), 2), dtype=np.float32)
             
            for i, match in enumerate(matches):
                points1[i, :] = self.orb_keypoints1[match.queryIdx].pt
                points2[i, :] = keypoints2[match.trainIdx].pt
               
            # Find homography
            h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
            warp_matrix = h

        return (im2, warp_matrix)
コード例 #16
0
ファイル: ImageMatcher.py プロジェクト: ChiyenLee/py-image
    def SURFmatch(self, imageName):
        '''
        Performs a SURF image match of a query image against the specified image from 
        the dataset. 
        '''

        print("SURF matching: " + imageName + "...")
        MIN_MATCH_COUNT = 200
        FLANN_INDEX_KDTREE = 0

        query = cv2.imread(self.image)
        training = cv2.imread(self.dataset + "/" + imageName)
        surf = cv2.xfeatures2d.SURF_create()
        kp1, des1 = surf.detectAndCompute(query, None)
        kp2, des2 = surf.detectAndCompute(training, None)

        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(des1, des2, k=2)
        filtered = list(filter(lambda x:x[0].distance < 0.7*x[1].distance, matches))
        good = list(map(lambda x: x[0], filtered))

        if len(good) > MIN_MATCH_COUNT:
            print("\tFound %s matches" % (len(good)))
            src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()

            h,w = query.shape[:2]
            pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
            dst = cv2.perspectiveTransform(pts,M)

            width = cv2.imread(self.image).shape[1]
            offset = 0.5 * (0.5 * (dst[0][0] + dst[1][0]) + 0.5 * (dst[2][0] - width + dst[3][0] - width))

            self.estimates.append((offset[0] * 60/width + int(imageName[5:8]), len(good)))

            training = cv2.polylines(training,[np.int32(dst)],True,255,3,cv2.LINE_AA)

        else:
            print("\tNot enough matches found - %d/%d" % (len(good), MIN_MATCH_COUNT))

        if self.seeSURFMatchesFlag:
            draw_params = dict(matchColor=(0,255,0), 
                singlePointColor=None, 
                matchesMask=matchesMask, 
                flags=2)

            result = cv2.drawMatches(query,kp1,training,kp2,good,None,**draw_params)
            plt.imshow(result, 'gray'), plt.show()

        return len(good)
コード例 #17
0
def siftFeature(prev, curr, mask):
    sift = cv2.SIFT(nOctaveLayers=3, contrastThreshold=0.05, edgeThreshold=10)
    #surf = cv2.SURF(1000)
    kp1, des1 = sift.detectAndCompute(prev, mask)
    kp2, des2 = sift.detectAndCompute(curr, None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []

    for m, n in matches:
        if m.distance < 0.6 * n.distance:
            good.append(m)
    print len(kp1), len(kp2), len(matches), len(good)
    if len(good) > 10:
        src_pts = np.float32(
            [kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32(
            [kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 3.0)
        if M is None:
            return None, None
        matchesMask = mask.ravel().tolist()
        img = curr.copy()
        for idx, m in enumerate(matchesMask):
            x1, y1 = map(int, src_pts[idx][0])
            x2, y2 = map(int, dst_pts[idx][0])
            if m == 1:

                cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)
                cv2.circle(img, (x1, y1), 2, (0, 0, 255), -1)
                cv2.circle(img, (x2, y2), 1, (0, 0, 255), -1)
        draw_params = dict(matchColor=(0, 255, 0),
                           singlePointColor=None,
                           matchesMask=matchesMask,
                           flags=2)
        img3 = cv2.drawMatches(prev,
                               kp1, curr, kp2, good, None, **draw_params)

        cv2.imshow('sift', img3)
    else:
        print "Not enough matches are found - %d/%d" % (len(good), 50)
        matchesMask = None
        M = None
        img = curr.copy()

    return img, M
コード例 #18
0
ファイル: parse_hex.py プロジェクト: ebensh/hoplite_ai
def main():
  # This scene contains a Sword Monster.
  scene = cv2.imread('hex_images/hex_-1_-1_2.png', cv2.IMREAD_COLOR)
  mon_sword = cv2.imread('hoplite_assets/mon_sword_1.png', cv2.IMREAD_COLOR)
  mon_bow = cv2.imread('hoplite_assets/mon_bow_1.png', cv2.IMREAD_COLOR)
  mon_wizard = cv2.imread('hoplite_assets/mon_wizard_1.png',
                          cv2.IMREAD_COLOR)
  
  sift = cv2.xfeatures2d.SURF_create()
  # find the keypoints and descriptors with SIFT
  keypoints_scene, descriptors_scene = sift.detectAndCompute(scene, None)
  for mon in [mon_sword, mon_bow, mon_wizard]:
    keypoints_mon, descriptors_mon = sift.detectAndCompute(mon, None)
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(descriptors_scene, descriptors_mon, k=2)
    # store all the good matches as per Lowe's ratio test.
    good = []
    for m, n in matches:
      if m.distance < 0.7 * n.distance:
        good.append(m)

    if len(good) > MIN_MATCH_COUNT:
      src_pts = np.float32([keypoints_scene[m.queryIdx].pt for m in good]).reshape(-1,1,2)
      dst_pts = np.float32([keypoints_mon[m.trainIdx].pt for m in good]).reshape(-1,1,2)

      M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
      matchesMask = mask.ravel().tolist()

      h,w = scene.shape
      pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
      dst = cv2.perspectiveTransform(pts,M)

      mon = cv2.polylines(mon,[np.int32(dst)], True, 255, 3, cv2.LINE_AA)

    else:
      print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
      matchesMask = None

    draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                       singlePointColor = None,
                       matchesMask = matchesMask, # draw only inliers
                       flags = 2)

    result = cv2.drawMatches(scene, keypoints_scene, mon, keypoints_mon,
                             good,None,**draw_params)

    cv2.namedWindow('result', cv2.WINDOW_AUTOSIZE)
    cv2.imshow('result', result)
    cv2.waitKey(0)
  cv2.destroyAllWindows()  
コード例 #19
0
def do_it(im1, im2):
    orb = cv2.ORB_create()
    m = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    kp1, desc1 = orb.detectAndCompute(im1, None)
    kp2, desc2 = orb.detectAndCompute(im2, None)
    matches = m.match(desc1, desc2)
    matches = sorted(matches, key=lambda x: x.distance)
    res = cv2.drawMatches(im1, kp1, im2, kp2, matches[:20], None, flags=2)
    cv2.imshow("res", res)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #20
0
ファイル: face.py プロジェクト: souryapoddar290990/sourya
def function6():
    img1 = cv2.imread('face1.jpg',0)
    img2 = cv2.imread('face2.jpg',0)
    orb = cv2.ORB_create()
    kp1, des1 = orb.detectAndCompute(img1,None)
    kp2, des2 = orb.detectAndCompute(img2,None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(des1,des2)
    matches = sorted(matches, key = lambda x:x.distance)
    img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10], flags=2)
    plt.imshow(img3)
    plt.show()
コード例 #21
0
ファイル: CamView.py プロジェクト: eporcell/CamView
    def matchImage(self):
        img1 = cv2.imread("/home/edgardo/Pictures/CamView/4-processed/canny_1_(7,7):1-90:200.png")
        img2 = cv2.imread("/home/edgardo/Pictures/CamView/4-processed/canny_2_(7,7):1-90:200.png")

        orb = cv2.ORB_create()
        kp1, des1 = orb.detectAndCompute(img1, None)
        kp2, des2 = orb.detectAndCompute(img2, None)
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = bf.match(des1, des2)
        matches = sorted(matches, key = lambda x:x.distance)
        img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:40], img2, flags = 2)
        plt.imshow(img3),plt.show()
コード例 #22
0
ファイル: Demo1.py プロジェクト: JamesPei/PythonProjects
def Homography():
    MIN_MATCH_COUNT = 10

    img1 = cv2.imread('benzene4.jpg',0)          # queryImage
    img2 = cv2.imread('chemistry1.jpg',0)          # trainImage

    # Initiate ORB detector
    orb = cv2.ORB_create(nfeatures=500, edgeThreshold=5)

    # find the keypoints and descriptors with ORB
    kp1, des1 = orb.detectAndCompute(img1,None)
    kp2, des2 = orb.detectAndCompute(img2,None)
    print 'kp1_length:',len(kp1)
    print 'kp2_length:',len(kp2)

    # FLANN parameters
    FLANN_INDEX_LSH = 0
    index_params= dict(algorithm = FLANN_INDEX_LSH, table_number = 6, key_size = 12,multi_probe_level = 1)
    search_params = dict(checks=50)   # or pass empty dictionary

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(np.asarray(des1,np.float32),np.asarray(des2,np.float32), k=2)
    print '匹配数:',len(matches)
    good = []
    for m,n in matches:
        if m.distance < 0.8*n.distance:
            good.append(m)

    if len(good)>MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2)
        dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2)

        # cv2.findHomography(srcPoints, dstPoints[, method[, ransacReprojThreshold[, mask]]]) → retval, mask
        # ransacReprojThreshold –Maximum allowed reprojection error to treat a point pair as an inlier (used in the RANSAC method only)
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()

        h,w = img1.shape
        pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
        dst = cv2.perspectiveTransform(pts,M)
        # cv2.polylines(img, pts, isClosed, color[, thickness[, lineType[, shift]]]) → img
        img2 = cv2.polylines(img2,[np.int32(dst)],True, 127, 3, cv2.LINE_AA)
    else:
        print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
        matchesMask = None

    draw_params = dict( matchColor = (0,255,0), # draw matches in green color
                        singlePointColor = None,
                        matchesMask = matchesMask, # draw only inliers
                        flags = 2)
    img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
    plt.imshow(img3, 'gray'),plt.show()
コード例 #23
0
ファイル: CVQT.py プロジェクト: tkubic/CVQT
    def templateSURF(self):
        if 'surf' not in globals():
            global surf
            print("creating surf component")
            surf = cv2.xfeatures2d.SURF_create(400)
        # find the keypoints and descriptors with SURF
        kpT, desT = surf.detectAndCompute(templateImage, None)
        kpA, desA = surf.detectAndCompute(activeImage, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(desT, desA, k=2)

        # store all the good matches as per Lowe's ratio test
        good = []
        for m, n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)

        MIN_MATCH_COUNT = self.ui.sbFeatureMinCnt.value()
        print(len(good))
        if len(good)>MIN_MATCH_COUNT:
            src_pts = np.float32([ kpT[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kpA[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
            matchesMask = mask.ravel().tolist()

            h, w = activeImage.shape
            pts = np.float32([ [0, 0], [0,h-1], [w-1, h-1], [w-1, 0] ]).reshape(-1,1,2)
            dst = cv2.perspectiveTransform(pts, M)

            global activeImage
            activeImage = activeImage.copy()
            activeImage = cv2.polylines(activeImage, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

        else:
            print "Not enough matches are found -%d/%d" % (len(good), MIN_MATCH_COUNT)
            matchesMask = None

        draw_params = dict(matchColor = (0,255, 0),     # draw matches in green color
                           singlePointColor = None,
                           matchesMask = matchesMask,   # draw only inliers
                           flags = 2)

        matchedImage = cv2.drawMatches(templateImage, kpT, activeImage, kpA, good, None, **draw_params)

        cv2.imshow("matched image", matchedImage)
コード例 #24
0
ファイル: match.py プロジェクト: cookiebus/graduation_project
def match(img1, img2, MIN_MATCH_COUNT = 10):
    
    surf = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = surf.detectAndCompute(img1, None)
    kp2, des2 = surf.detectAndCompute(img2, None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    # bf  = cv2.BFMatcher()
    matches = flann.knnMatch(des1, des2, k=2)
    # matches = bf.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)
    print "Good Matched Point:", len(good)

    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1, 1, 2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1, 1, 2)
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()

        h, w, _ = img1.shape
        pts = np.float32([ [0, 0], [0, h-1], [w-1, h-1], [w-1, 0] ]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)

        img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
    else:
        print "Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT)
        matchesMask = None
        return False, ''

    draw_params = dict(matchColor = (0, 255, 0), # draw matches in green color
                       singlePointColor = None, 
                       matchesMask = matchesMask, # draw only inliers
                       flags = 2)

    img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)
    plt.imshow(img3, 'gray'), plt.show()
    dst_img = cv2.warpPerspective(img1, M, (h * 2, w))
    # dst_img = cv2.resize(dst_img, None, fx=0.25, fy=0.25, interpolation = cv2.INTER_CUBIC)
    cv2.imwrite('/Users/snake/Documents/images/result.jpg',dst_img)
    return True, dst_img
コード例 #25
0
def draw_matches(reference_features, unknown_features, mask, good_pts):
    fig = plt.figure()
    draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                       singlePointColor = (255,0,0),
                       matchesMask = mask,
                       flags = 2)

    img3 = cv2.drawMatches(reference_features["image"],
                           reference_features["kps"],
                           unknown_features["image"],
                           unknown_features["kps"],
                           good_pts,None,**draw_params)
    plt.imshow(img3)
    return fig
コード例 #26
0
def harris_match(g_first,g_second):
    global curr_img
    global mpc_pos

    h_first = harris_only(g_first)
    h_second = harris_only(g_second)
    
    #Match the identical corners in the two given input images
    ofrb_var = cv2.ORB_create()
    (f_key,f_dscrpt) = ofrb_var.detectAndCompute(h_first, None)
    (s_key,s_dscrpt) = ofrb_var.detectAndCompute(h_second, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    mtchs = bf.match(f_dscrpt,s_dscrpt)
    mtchs = sorted(mtchs, key = lambda x:x.distance)
    curr_img = cv2.drawMatches(h_first,f_key,h_second,s_key,mtchs[:mpc_pos],None,flags=2)
コード例 #27
0
def draw_kps_match(img1, img2):

    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SURF_create()

    # find the key points and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)
    matches = bf.match(des1, des2)
    matches_sorted = sorted(matches, key=lambda x: x.distance)

    img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches1to2=matches_sorted[:200], outImg=np.ndarray(img1.shape))

    return img3
コード例 #28
0
 def match(self, img1, img2, draw):
     kp1, des1 = self.corner(img1)
     kp2, des2 = self.corner(img2)
     # if ver2 == 'FLANN':
     # 	# FLANN parameters
     # 	FLANN_INDEX_KDTREE = 0
     # 	index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
     # 	search_params = dict(checks=50)  # or pass empty dictionary
     #
     # 	flann = cv2.FlannBasedMatcher(index_params, search_params)
     #
     # 	matches = flann.knnMatch(des1, des2, k=2)
     #
     # 	# Need to draw only good matches, so create a mask
     # 	matchesMask = [[0, 0] for i in xrange(len(matches))]
     #
     # 	# ratio test as per Lowe's paper
     # 	for i, (m, n) in enumerate(matches):
     # 		if m.distance < 0.7 * n.distance:
     # 			matchesMask[i] = [1, 0]
     #
     # 	draw_params = dict(matchColor=(0, 255, 0),
     # 					   singlePointColor=(255, 0, 0),
     # 					   matchesMask=matchesMask,
     # 					   flags=0)
     #
     # 	return cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches[:self.matchPoints], None, **draw_params)
     matches = self.bf.knnMatch(des1, des2, k=2)
     # matches = sorted(matches, key = lambda x:x.distance)
     good = []
     for m, n in matches:
         if m.distance < 0.6 * n.distance:
             good.append(m)
     matches = good
     list_kp1 = []
     list_kp2 = []
     for m in good:
         img1_idx = m.queryIdx
         img2_idx = m.trainIdx
         (x1, y1) = kp1[img1_idx].pt
         (x2, y2) = kp2[img2_idx].pt
         list_kp1.append((x1, y1))
         list_kp2.append((x2, y2))
     if draw:
         return cv2.drawMatches(img1, kp1, img2, kp2, good[:self.matchPoints], None, flags=2)
     else:
         return list_kp1, list_kp2
コード例 #29
0
def findPatternORB(frame, orb, kp, des, template, preview):
   #find keypoints for new frame
   kp2, des2 = orb.detectAndCompute(frame, None)
   found = False
   x, y = 0, 0

   #use brute force matching
   bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
   matches = bf.match(des, des2)
   matches = sorted(matches, key = lambda x:x.distance)

   #There is a match if there is at least 10 matches and the distance of the 
   # furthest one is not too far
   if len(matches) > 6 and matches[6].distance < 50:
      found = True
      frame = cv2.drawMatches(template, kp, frame, kp2, matches[:6], outImg=None, flags=2)
   return found, (x, y), frame
コード例 #30
0
ファイル: sift_test.py プロジェクト: sarathkites/myworks
def sift_compute(file1,file2,result):
	global cnt
	global selfile
	global selcategory

	img1 = cv2.imread(file1)
	img2 = cv2.imread(file2)
	#show_rgb_img(img1);
	#plt.show()

	img1_gray = to_gray(img1)
	img2_gray = to_gray(img2)

	#plt.imshow(img1_gray, cmap='gray'),plt.show();
	# generate SIFT keypoints and descriptors
	img1_kp, img1_desc = gen_sift_features(img1_gray)
	img2_kp, img2_desc = gen_sift_features(img2_gray)

	#print ('Here are what our SIFT features look like for the   image:')
	#show_sift_features(img1_gray, img1_front, img1_kp);
	#plt.show()

	# create a BFMatcher object which will match up the SIFT features
	bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
	matches = bf.match(img1_desc, img2_desc)
	# Sort the matches in the order of their distance.
	matches = sorted(matches, key = lambda x:x.distance)
	file = os.path.basename(file2)
	parent=os.path.basename(os.path.dirname(file2))
	if(len(matches)>= cnt):
		cnt = len(matches)
		selfile = file
		selcategory = parent

	print("{}<><><>{}<><><>{}".format(file2,len(matches),parent))
    
	# draw the top N matches
	N_MATCHES = 100

	match_img = cv2.drawMatches( img1, img1_kp, img2, img2_kp, matches[:N_MATCHES], img2.copy(), flags=0)

	#plt.figure(figsize=(12,6))
	#plt.imshow(match_img);
	#plt.show()
	cv2.imwrite(result,match_img)
コード例 #31
0
def Matches(image, Match_image, angle, box, point, CenterX, CenterY):
    x = 0
    y = 0
    while y < len(Match_image):
        # cv.imshow("box", Match_image[y])
        # cv.imshow("image", image)

        # 创建sift特征检测器
        sift = cv.xfeatures2d.SIFT_create()
        kp1, des1 = sift.detectAndCompute(Match_image[y], None)
        kp2, des2 = sift.detectAndCompute(image, None)

        # 匹配
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)
        goodMatches = []

        # 筛选出好的描述子
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                goodMatches.append(m)
        # print('good',len(goodMatches)

        # 单独保存 obj 和 scene 好的点位置
        obj_pts = []
        scene_pts = []
        if len(goodMatches) > MIN_MATCH_COUNT:
            # 获取关键点的坐标
            obj_pts = np.float32([kp1[m.queryIdx].pt
                                  for m in goodMatches]).reshape(-1, 1, 2)
            scene_pts = np.float32([kp2[m.trainIdx].pt
                                    for m in goodMatches]).reshape(-1, 1, 2)
            #计算变换矩阵和MASK
            M, mask = cv.findHomography(obj_pts, scene_pts, cv.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()
            h, w = Match_image[y].shape

            # pts 為box照片的四個角落座標
            pts = np.float32([[0, 0], [0, h], [w, h], [w,
                                                       0]]).reshape(-1, 1, 2)
            # cv.circle(box, (0,0), 10, (1, 227, 254), -1) #左上
            # cv.circle(box, (0,440), 10, (1, 227, 254), -1) #左下
            # cv.circle(box, (236,440), 10, (1, 227, 254), -1) #右下
            # cv.circle(box, (236,0), 10, (1, 227, 254), -1) #右上
            # print("PTS : ")
            # print(pts)
            # print(pts[0][0][1]) #左上
            # print(pts[1][0][1]) #左下
            # print(pts[2][0][1]) #右下
            # print(pts[3][0][1]) #右上
            # 使用得对到的变换矩阵原图像的四个角进行变换,获得在目标图像上对应的坐标
            dst = cv.perspectiveTransform(pts, M).reshape(-1, 2)
            # print("DST : ")
            # print(dst)
            # dst[0] 左上
            # dst[1] 左下
            # dst[2] 右下
            # dst[3] 右上

            # 求左上->右下 中心點
            X_abs = int((dst[0][0] + dst[2][0]) / 2)
            Y_abs = int((dst[0][1] + dst[2][1]) / 2)
            print("X_abs : ", X_abs)
            print("Y_abs : ", Y_abs)
            cv.circle(image, (X_abs, Y_abs), 10, (1, 227, 254), -1)

            # 求右上->左下 中心點
            X_abs_2 = int((dst[1][0] + dst[3][0]) / 2)
            Y_abs_2 = int((dst[1][1] + dst[3][1]) / 2)
            print("X_abs_2 : ", X_abs_2)
            print("Y_abs_2 : ", Y_abs_2)

            # 求左上->右下 斜邊
            X_side = int((dst[0][0] + dst[2][0]))
            Y_side = int((dst[0][1] + dst[2][1]))
            print("X_side : ", X_side)
            print("Y_side : ", Y_side)

            # 求右上->左下 斜邊
            X_side_2 = int((dst[1][0] + dst[3][0]))
            Y_side_2 = int((dst[1][1] + dst[3][1]))
            print("X_side_2 : ", X_side_2)
            print("Y_side_2 : ", Y_side_2)

            X_side_Less = abs(X_side - X_side_2)
            Y_side_Less = abs(Y_side - Y_side_2)
            X_center_point_Less = abs(X_abs - X_abs_2)
            Y_center_point_Less = abs(Y_abs - Y_abs_2)
            print("X_sidet_Less : ", X_side_Less)
            print("Y_side_Less : ", Y_side_Less)
            print("X_center_point_Less : ", X_center_point_Less)
            print("Y_center_point_Less : ", Y_center_point_Less)

            # 求角度
            X1 = pts[2][0][0] - pts[0][0][0]
            Y1 = pts[2][0][1] - pts[0][0][1]
            # print(X1,Y1)
            X2 = dst[2][0] - dst[0][0]
            Y2 = dst[2][1] - dst[0][1]
            # print(X2,Y2)
            angle1 = np.rad2deg(np.arctan2(Y1, X1))
            angle2 = np.rad2deg(np.arctan2(Y2, X2))
            # print(angle1,angle2)
            angle_diff = angle2 - angle1
            print('angle_1', angle1)
            print('angle_2', angle2)
            print('angle', angle_diff)
            print("scene_pts : ", len(scene_pts))
            # 加上偏移量
            for i in range(4):
                dst[i][0] += w

            draw_params = dict(singlePointColor=None,
                               matchesMask=matchesMask,
                               flags=2)
            # result = cv.drawMatches(box, kp1, image, kp2, goodMatches, None)
            result = cv.drawMatches(Match_image[y], kp1, image, kp2,
                                    goodMatches, None, **draw_params)
            cv.polylines(result, [np.int32(dst)], True, (0, 0, 255), 3,
                         cv.LINE_AA)
            # cv.namedWindow('flann-match', cv.WINDOW_NORMAL) #設置為WINDOW_NORMAL可以任意縮放
            # cv.imshow('flann-match',result)

            if (X_side >= 50 and X_side <= 2000) and (Y_side >= 50
                                                      and Y_side <= 2000):
                if ((X_side_Less == 0 and Y_side_Less == 0)
                        or (X_side_Less <= 6 and Y_side_Less <= 6)):
                    if ((X_center_point_Less == 0 and Y_center_point_Less == 0)
                            or (X_center_point_Less <= 5
                                and Y_center_point_Less <= 5)):
                        angle.append(angle_diff)
                        point.append(len(scene_pts))
                        CenterX.append(X_abs)
                        CenterY.append(Y_abs)
                        if (y == 0):
                            box.append('W')
                        if (y == 1):
                            box.append('R')
                        if (y == 2):
                            box.append('Y')
                        if (y == 3):
                            box.append('G')
                        if (y == 4):
                            box.append('WP')
                        if (y == 5):
                            box.append('GP')

                y = y + 1
                # cv.waitKey(0)
                # cv.destroyAllWindows()

        else:
            print("Not enough matches are found - %d/%d" %
                  (len(goodMatches), MIN_MATCH_COUNT))
            # matchesMask = None
            y = y + 1
コード例 #32
0
    x2 = max(Xs)
    y1 = min(Ys)
    y2 = max(Ys)
    hight = y2 - y1
    width = x2 - x1
    crop_img = img1[y1:y1 + hight, x1:x1 + width]
    cv2.imshow('draw_img', draw_img)
    cv2.imshow('crop_img', crop_img)
    # 绘制边框
    # cv2.polylines(canvas,[np.int32(dst)],True,(0,255,0),3, cv2.LINE_AA)
else:
    ## (9) Crop the matched region from scene
    print("Not enough matches are found - ".format(len(good), MIN_MATCH_COUNT))

## (8) drawMatches
matched = cv2.drawMatches(img1, kpts1, canvas, kpts2, good, None, (0, 255, 0))
#参数2:左上角坐标,参数3:右下角坐标
# cv2.rectangle(matched, (384, 0), (510, 128), (0, 0, 255), 3)
# cv2.rectangle(matched, (590, 197), (885, 490), (0, 0, 255), 3)
# cv2.rectangle(matched, (384, 0), (510, 128), (0, 0, 255), 3)
# 画一个填充红色的圆,参数2:圆心坐标,参数3:半径
# cv2.circle(matched, (678, 890), 63, (255, 0, 0), -1)
#在图中画一个小矩形 最后一个是粗细
#matched = cv2.line(canvas, kpts1, kpts2, (255, 0, 0))
h, w = img1.shape[:2]
#pts = np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0]]).reshape(-1,1,2)
#dst = cv2.perspectiveTransform(pts, M)
#perspectiveM = cv2.getPerspectiveTransform(np.float32(dst),pts)
#found = cv2.warpPerspective(img2,perspectiveM,(w,h))

## (10) save and display
コード例 #33
0
def match1():
    parser = argparse.ArgumentParser(
        description='Code for Feature Matching with FLANN tutorial.')
    parser.add_argument('--input1',
                        help='Path to input image 1.',
                        default='rec_small1.jpg')
    parser.add_argument('--input2',
                        help='Path to input image 2.',
                        default='facing1.jpg')
    args = parser.parse_args()
    img_object = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
    img_scene = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
    if img_object is None or img_scene is None:
        print('Could not open or find the images!')
        exit(0)
    #-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
    minHessian = 400
    detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
    keypoints_obj, descriptors_obj = detector.detectAndCompute(
        img_object, None)
    keypoints_scene, descriptors_scene = detector.detectAndCompute(
        img_scene, None)
    #-- Step 2: Matching descriptor vectors with a FLANN based matcher
    # Since SURF is a floating-point descriptor NORM_L2 is used
    matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
    knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
    #-- Filter matches using the Lowe's ratio test
    ratio_thresh = 0.75
    good_matches = []
    for m, n in knn_matches:
        if m.distance < ratio_thresh * n.distance:
            good_matches.append(m)
    #-- Draw matches
    img_matches = np.empty(
        (max(img_object.shape[0],
             img_scene.shape[0]), img_object.shape[1] + img_scene.shape[1], 3),
        dtype=np.uint8)
    cv.drawMatches(img_object,
                   keypoints_obj,
                   img_scene,
                   keypoints_scene,
                   good_matches,
                   img_matches,
                   flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    #-- Localize the object
    obj = np.empty((len(good_matches), 2), dtype=np.float32)
    scene = np.empty((len(good_matches), 2), dtype=np.float32)
    for i in range(len(good_matches)):
        #-- Get the keypoints from the good matches
        obj[i, 0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
        obj[i, 1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
        scene[i, 0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
        scene[i, 1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
    H, _ = cv.findHomography(obj, scene, cv.RANSAC)
    #-- Get the corners from the image_1 ( the object to be "detected" )
    obj_corners = np.empty((4, 1, 2), dtype=np.float32)
    obj_corners[0, 0, 0] = 0
    obj_corners[0, 0, 1] = 0
    obj_corners[1, 0, 0] = img_object.shape[1]
    obj_corners[1, 0, 1] = 0
    obj_corners[2, 0, 0] = img_object.shape[1]
    obj_corners[2, 0, 1] = img_object.shape[0]
    obj_corners[3, 0, 0] = 0
    obj_corners[3, 0, 1] = img_object.shape[0]
    scene_corners = cv.perspectiveTransform(obj_corners, H)
    #-- Draw lines between the corners (the mapped object in the scene - image_2 )
    cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
        (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
    cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
        (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
    cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
        (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
    cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
        (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
    #-- Show detected matches
    cv.imshow('Good Matches & Object detection', img_matches)
    # cv.imwrite('Good Matches&Object detection1.jpg',img_matches)
    # cv.imshow('Good Matches&Object detection1.jpg',img_matches)
    # print(int(scene_corners[0,0,0]),img_object.shape[1],int(scene_corners[0,0,1]))
    # print('Good Matches&Object detection1'+str(c) + '.jpg')
    center_point0 = (int(scene_corners[0, 0, 0] + img_object.shape[1]) +
                     int(scene_corners[2, 0, 0] + img_object.shape[1])) / 2
    center_point1 = (int(scene_corners[0, 0, 1]) +
                     int(scene_corners[2, 0, 1])) / 2
    center_point = [center_point0, center_point1]
    # return(center_point)
    print(center_point)
    while (True):
        # Press Q on keyboard to  exit
        if cv.waitKey(25) & 0xFF == ord('q'):
            break
コード例 #34
0
    def plot_matches(self,
                     image1,
                     keypoints1,
                     image2,
                     keypoints2,
                     matches,
                     color_matches,
                     color_keypoints,
                     mask=None,
                     color_outliers=None):
        """!
        Draws the given keypoints and matches onto the given images. The resulting image is of width image1.width + image2.width
        An optional mask determines what keypoints to draw and if color_outliers is set additionaly the mask will be treated as inliers and outliers and will be drawn in different colors.

        @param image1 The first image
        @param keypoints1 The keypoints of the first image
        @param image2 The second image
        @param keypoints2 The keypoints of the second image.
        @param matches The matches to plot
        @param color_matches: Color Triple (blue, green, red) with values from 0 to 255. Matches will be drawn in this color.
        @param color_keypoints Color Triple (blue, green, red) with values from 0 to 255. Keypoints will be drawn in this color.
        @param mask optional paremeter. Mask determining which matches are drawn. Has to be of same size as matches.
        @param color_outliers optional parameter. Color Triple (blue, green, red) with values from 0 to 255. Requires mask to be set as well. The masks values will be handled as inliers and will be drawn in color_matches and the outliers will be drawn in color_outliers.

        @return image_with_matches: returns image of size image1.width + image2.width with keypoints and matches drawn according to parameters
        """
        if image1 is None:
            raise ValueError('Image1 is not a valid image.')

        if image2 is None:
            raise ValueError('Image2 is not a valid image.')

        if (not isinstance(color_matches, tuple)
            ) or (len(color_matches) != 3) or (not all(
                isinstance(x, int)
                for x in color_matches)) or (not all(x >= 0 and x <= 255
                                                     for x in color_matches)):
            raise ValueError(
                'Color_matches needs to be of format (b, g, r) with integer values from 0 to 255.'
            )

        if (not isinstance(color_keypoints,
                           tuple)) or (len(color_keypoints) != 3) or (not all(
                               isinstance(x, int)
                               for x in color_keypoints)) or (not all(
                                   x >= 0 and x <= 255
                                   for x in color_keypoints)):
            raise ValueError(
                'Color_keypoints needs to be of format (b, g, r) with integer values from 0 to 255.'
            )

        if mask is None and color_outliers is not None:
            raise ValueError(
                'If color_outliers is set, mask needs to be set as well.')

        # Plot all matches
        if mask is None and color_outliers is None:
            return cv2.drawMatches(image1, keypoints1, image2, keypoints2,
                                   matches, None, color_matches,
                                   color_keypoints)

        if len(mask) != len(matches):
            raise ValueError('Mask and matches need to be of same length.')

        if mask is not None and color_outliers is None:
            return cv2.drawMatches(image1, keypoints1, image2, keypoints2,
                                   matches, None, color_matches,
                                   color_keypoints, mask)

        if (not isinstance(color_outliers,
                           tuple)) or (len(color_outliers) != 3) or (not all(
                               isinstance(x, int)
                               for x in color_outliers)) or (not all(
                                   x >= 0 and x <= 255
                                   for x in color_outliers)):
            raise ValueError(
                'Color_outliers needs to be of format (b, g, r) with integer values from 0 to 255.'
            )
        # Plot inliers and outliers in different colors
        inliers = mask.ravel().tolist()
        outliers = inliers.copy()
        for i in range(len(outliers)):
            if outliers[i] == 0:
                outliers[i] = 1
            else:
                outliers[i] = 0
        out = cv2.drawMatches(image1, keypoints1, image2, keypoints2, matches,
                              None, color_matches, color_keypoints, inliers)
        return cv2.drawMatches(image1, keypoints1, image2, keypoints2, matches,
                               out, color_outliers, color_keypoints, outliers,
                               1)
コード例 #35
0
ファイル: main.py プロジェクト: Szczukox/KCK
# Match descriptors.
matches20 = bf.match(des20, des)
matches50 = bf.match(des50, des)
matches100 = bf.match(des100, des)
matches_puste = bf.match(des_puste, des)

# Sort them in the order of their distance.
matches20 = sorted(matches20, key=lambda x: x.distance)
matches50 = sorted(matches50, key=lambda x: x.distance)
matches100 = sorted(matches100, key=lambda x: x.distance)
matches_puste = sorted(matches_puste, key=lambda x: x.distance)

# Draw matches.
if len(matches20) > len(matches50) and len(matches20) > len(matches100):
    img3 = cv2.drawMatches(img20, kp20, img, kp, matches20, None, flags=2)
    print("*** WYKRYTO 20ZŁ ***")
elif len(matches50) > len(matches20) and len(matches50) > len(matches100):
    img3 = cv2.drawMatches(img50, kp50, img, kp, matches50, None, flags=2)
    print("*** WYKRYTO 50ZŁ ***")
elif len(matches100) > len(matches20) and len(matches100) > len(matches50):
    img3 = cv2.drawMatches(img100, kp100, img, kp, matches100, None, flags=2)
    print("*** WYKRYTO 100ZŁ ***")
else:
    img3 = cv2.drawMatches(img_puste, kp_puste, img, kp, matches_puste, None, flags=2)
    print("*** NIE WYKRYTO NIC ***")

while True:
    cv2.imshow('frame', img3)

    if cv2.waitKey(1) & 0xFF == ord('q'):
コード例 #36
0
def draw_key_sift(path1, path2):
    # bf = cv2.BFMatcher()
    # match = bf.knnMatch(des1, des2, k=2)
    # match2 = bf.knnMatch(des1, des3, k=2)
    # good = []
    # good2 = []
    # for m, n in match:
    #     if m.distance < 0.75 * n.distance:
    #         good.append([m])
    #
    # for m, n in match2:
    #     if m.distance < 0.75 * n.distance:
    #         good2.append([m])
    # print(len(good))
    # print(len(good2))
    # # print("n: ", match.n, "\n m", match.m)
    # img4 = cv2.drawMatchesKnn(img1, key1, img2, key2, good, None, flags=2)
    # img5 = cv2.drawMatchesKnn(img1, key1, img3, key3, good, None, flags=2)

    # FLANN parameters

    img1 = cv2.imread(path1)
    img2 = cv2.imread(path2)

    sift = cv2.xfeatures2d.SIFT_create()

    key1, des1 = sift.detectAndCompute(img1, None)
    key2, des2 = sift.detectAndCompute(img2, None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)  # or pass empty dictionary

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1, des2, k=2)
    # Need to draw only good matches, so create a mask
    # matchesMask1 = [[0, 0] for i in range(len(matches1))]
    # matchesMask2 = [[0, 0] for i in range(len(matches2))]
    # ratio test as per Lowe's paper
    # for i, (m, n) in enumerate(matches1):
    #     if m.distance < 0.7 * n.distance:
    #         matchesMask1[i] = [1, 0]
    #
    # for i, (m, n) in enumerate(matches2):
    #     if m.distance < 0.7 * n.distance:
    #         matchesMask2[i] = [1, 0]
    #
    # draw_params1 = dict(matchColor=(0, 255, 0),
    #                     singlePointColor=(255, 0, 0),
    #                     matchesMask=matchesMask1,
    #                     flags=0)
    #
    # draw_params2 = dict(matchColor=(0, 255, 0),
    #                     singlePointColor=(255, 0, 0),
    #                     matchesMask=matchesMask2,
    #                     flags=0)
    # if len(matches1) >= len(matches2):
    #     img6 = cv2.drawMatchesKnn(img1, key1, img2, key2, matches1, None, **draw_params1)
    #     cv2.imshow("Image", img6)
    #     cv2.waitKey(0)
    #     cv2.destroyAllWindows()
    # else:

    good = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)

    MIN_MATCH_COUNT = 10

    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([key1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([key2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()
        h, w, d = img1.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
    else:
        matchesMask = None

    draw_params = dict(
        matchColor=(0, 255, 0),  # draw matches in green color
        singlePointColor=None,
        matchesMask=matchesMask,  # draw only inliers
        flags=2)

    matching_result = cv2.drawMatches(img1, key1, img2, key2, good, None,
                                      **draw_params)
    img = Image.fromarray(cv2.cvtColor(matching_result, cv2.COLOR_BGR2RGB))
    return img
コード例 #37
0
matches = bf.match(des1, des2)

good = sorted(matches, key=lambda x: x.distance)

if len(good) > MIN_MATCH_COUNT:
    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

if np.shape(M) == ():
    print("No transformation possible")
#return None, None

## derive rotation angle from homography

theta = -math.atan2(M[0, 1], M[0, 0]) * 180 / math.pi

total_move = src_pts[0][0] - dst_pts[0][0]

Horizontal = total_move[0]
Vertical = total_move[1]

print "Displacement in X:", Horizontal
print "Displacement in Y:", Vertical
print "Angle detected:", abs(theta)

img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[1:10], None, flags=2)
cv2.imwrite("output.jpg", img3)
コード例 #38
0
plt.figure(5)
plt.imshow((cv2.drawKeypoints(train_img_gray, train_kp, train_img.copy())))
plt.title('Train Image Keypoints')

plt.figure(6)
plt.imshow((cv2.drawKeypoints(query_img_gray, query_kp, query_img.copy())))
plt.title('Query Image Keypoints')

# create a BFMatcher object which will match up the SIFT features
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)

matches = bf.match(train_desc, query_desc)

# Sort the matches in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)

# draw the top N matches
N_MATCHES = 100

match_img = cv2.drawMatches(train_img,
                            train_kp,
                            query_img,
                            query_kp,
                            matches[:N_MATCHES],
                            query_img.copy(),
                            flags=0)

plt.figure(7)
plt.imshow(match_img)
plt.title('SIFT Detection')
plt.show()
コード例 #39
0
                    [kp1[matches[i].queryIdx].pt for i in range(len(matches))])
                right_point = np.asarray(
                    [kp2[matches[i].trainIdx].pt for i in range(len(matches))])
                left_point[:, 0] += xg
                left_point[:, 1] += yg
                right_point[:, 0] += xd
                right_point[:, 1] += yd
                while len(left_point) < nbr:
                    left_point = np.append(left_point, [[1e17, 1e17]], axis=0)
                    right_point = np.append(right_point, [[1e17, 1e17]],
                                            axis=0)

                img = cv2.drawMatches(frameg[yg:yg + hg, xg:xg + wg],
                                      kp1,
                                      framed[yd:yd + hd, xd:xd + wd],
                                      kp2,
                                      matches,
                                      None,
                                      flags=2)

                cv2.namedWindow("ok", cv2.WINDOW_NORMAL)
                cv2.imshow("ok", img)
                key = cv2.waitKey(1)
                if key == 'q':
                    break
                if count > 0:
                    trajectory_camera_coord_gauche = np.append(
                        trajectory_camera_coord_gauche, left_point, axis=0)
                    trajectory_camera_coord_droite = np.append(
                        trajectory_camera_coord_droite, right_point, axis=0)
                if count == 0:
コード例 #40
0
print('good matches:%d/%d' % (len(good_matches), len(matches)))

# 좋은 매칭점의 queryIdx로 원본 영상의 좌표 구하기
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches])
# 좋은 매칭점의 trainIdx로 대상 영상의 좌표 구하기
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches])

# findHomography() 함수로 원근 변환 행렬 구하기
mtrx, mask = cv2.findHomography(src_pts, dst_pts)

# 원본 영상 크기로 변환 영역 좌표 생성
h, w, = img1.shape[:2]
pts = np.float32([[[0, 0]], [[0, h - 1]], [[w - 1, h - 1]], [[w - 1, 0]]])

# perspectiveTransform() 함수로 원본 영상 좌표를 원근 변환시킴
dst = cv2.perspectiveTransform(pts, mtrx)

# 변환 좌표 영역을 대상 영상에 그리기
img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

# 좋은 매칭 그려서 출력
res = cv2.drawMatches(img1,
                      kp1,
                      img2,
                      kp2,
                      good_matches,
                      None,
                      flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
cv2.imshow('Matching Homography', res)
cv2.waitKey()
cv2.destroyAllWindows()
コード例 #41
0
feature = cv2.KAZE_create()
#feature = cv2.AKAZE_create()
#feature = cv2.ORB_create()

# 특징점 검출 및 기술자 계산
kp1, desc1 = feature.detectAndCompute(src1, None)
kp2, desc2 = feature.detectAndCompute(src2, None)

# 특징점 매칭
matcher = cv2.BFMatcher_create()
#matcher = cv2.BFMatcher_create(cv2.NORM_HAMMING)
matches = matcher.knnMatch(desc1, desc2, 2)

# 좋은 매칭 결과 선별
good_matches = []
for m in matches:
    if m[0].distance / m[1].distance < 0.7:
        good_matches.append(m[0])

print('# of kp1:', len(kp1))
print('# of kp2:', len(kp2))
print('# of matches:', len(matches))
print('# of good_matches:', len(good_matches))

# 특징점 매칭 결과 영상 생성
dst = cv2.drawMatches(src1, kp1, src2, kp2, good_matches, None)

cv2.imshow('dst', dst)
cv2.waitKey()
cv2.destroyAllWindows()
コード例 #42
0
    def detectRDT(self, img, cnt=5):
        print('[INFO] start detectRDT')
        startTime = time.time()
        height, width = img.shape
        p1 = (0, int(height * (1 - (VIEW_FINDER_SCALE_W) / CROP_RATIO) / 2))
        p2 = (int(width - p1[0]), int(height - p1[1]))
        #"""
        #    ==================
        #    TODO: this mask is still HARDCODEDDDDDD!!!!!!
        #    =================
        #"""
        #p1 = (int(width * (1 - VIEW_FINDER_SCALE_W)/2 * CROP_RATIO) , 0)
        #p2 = (int(width - p1[0]), int(height - p1[1] - 65))
        roi = img[p1[1]:p2[1], p1[0]:p2[0]]
        # img = roi
        mask = np.zeros((height, width), np.uint8)
        mask[p1[1]:p2[1], p1[0]:p2[0]] = 255
        # show_image(img)
        # show_image(mask)
        # keypoints, descriptors = self.siftDetector.detectAndCompute(img, None)
        # show_image(mask)
        keypoints, descriptors = self.siftDetector.detectAndCompute(img, mask)
        print('[INFO] detect/compute time: ', time.time() - startTime)
        print('[INFO] descriptors')
        print(descriptors)
        # TODO: Find condition for this
        # if (descriptors == None or all(descriptors)):
        #     print('[WARNING] No Features on input')
        #     return None

        # Matching
        if descriptors is None:
            return None
        matches = self.matcher.knnMatch(self.refSiftDescriptors,
                                        descriptors,
                                        k=2)
        print('[INFO] Finish matching')
        print('matches', matches)
        # Apply ratio test
        good = []
        if matches is None or len(matches) == 0 or len(matches[0]) < 2:
            return None
        for m, n in matches:
            if m.distance < 0.80 * n.distance:
                good.append(m)

        matchingImage = cv.drawMatches(self.fluRefImg,
                                       self.refSiftKeyPoints,
                                       img,
                                       keypoints,
                                       good,
                                       None,
                                       flags=2)
        # plt.imshow(matchingImage)
        # plt.title('SIFT Brute Force matching')
        # plt.show()

        sum = 0
        distance = 0
        count = 0

        # store all the good matches as per Lowe's ratio test.
        img2 = None
        dst = None
        print('[INFO] matches')

        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([
                self.refSiftKeyPoints[m.queryIdx].pt for m in good
            ]).reshape(-1, 1, 2)
            dst_pts = np.float32([keypoints[m.trainIdx].pt
                                  for m in good]).reshape(-1, 1, 2)
            # print('src_pts', src_pts)
            # print('dst_pst', dst_pts)
            M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, cnt)
            print('[INFO] Finish finding Homography')
            # print('[INFO] M Matrix', M)
            # print('[INFO] mask', mask)
            matchesMask = mask.ravel().tolist()
            h, w = self.fluRefImg.shape
            pts = np.float32([[0, 0], [w - 1, 0], [w - 1, h - 1],
                              [0, h - 1]]).reshape(-1, 1, 2)
            if M is None or M.size == 0:
                return None
            dst = cv.perspectiveTransform(pts, M)
            print('[INFO] dst transformation pts', dst)
            img2 = np.copy(img)
            img2 = cv.polylines(img2, [np.int32(dst)], True, (255, 0, 0))
            pts_box = cv.minAreaRect(dst)
            box = cv.boxPoints(pts_box)  # cv2.boxPoints(rect) for OpenCV 3.x
            box = np.int0(box)
            cv.drawContours(img2, [box], 0, (0, 0, 255), 2)
            print('[INFO] finish perspective transform')
            print(box)
            # show_image(roi)
            # show_image(img2)
            # img2=None
            new_dst = np.copy(dst)
            #for i in list(range(0, 4)):
            #    min_dist = 99999999
            #    min_j = -1
            #    for j in list(range(0, 4)):
            #        print(box[i], dst[j])
            #        dist = pow(box[i][0]-dst[j][0][0], 2) + pow(box[i][1]-dst[j][0][1], 2)

            #        if dist < min_dist:
            #            print('---min', dist, j, box[i], new_dst[j])
            #            min_dist = dist
            #            min_j = j
            #    new_dst[min_j][0][0] = box[i][0]
            #    new_dst[min_j][0][1] = box[i][1]
            for i in list(range(0, 4)):
                if pts_box[2] < -45:
                    new_dst[(i + 2) % 4] = [box[i]]
                else:
                    new_dst[(i + 3) % 4] = [box[i]]

            dst = np.copy(new_dst)
            print(dst)

        else:
            print("Not enough matches are found - {}/{}".format(
                len(good), MIN_MATCH_COUNT))
            matchesMask = None
            return None

        draw_params = dict(
            matchColor=(255, 0, 0),  # draw matches in green color
            singlePointColor=None,
            matchesMask=matchesMask,  # draw only inliers
            flags=2)
        img3 = cv.drawMatches(self.fluRefImg, self.refSiftKeyPoints, img2,
                              keypoints, good, None, **draw_params)
        #plt.imshow(img3, 'gray'),plt.show()
        # show_image(img3)

        h, w = self.fluRefImg.shape
        #refBoundary = np.float32([ [0,0], [0,h-1],[w-1,h-1],[w-1,0]]).reshape(-1,1,2)
        refBoundary = np.float32([[0, 0], [w - 1, 0], [w - 1, h - 1],
                                  [0, h - 1]]).reshape(-1, 1, 2)
        print('Refboundary', refBoundary)
        print('Boundary', dst)
        M = cv.getPerspectiveTransform(dst, refBoundary)
        print('M matrixxxx', M)
        transformedImage = cv.warpPerspective(
            img, M, (self.fluRefImg.shape[1], self.fluRefImg.shape[0]))
        # show_image(roi)
        # show_image(transformedImage)

        return dst
コード例 #43
0
ファイル: findcorrespondences.py プロジェクト: ooCheese/vc
    
    #gftt match nur pro Ecke (also in unmittelbarer Umgebung) nur einmal während Harris das nicht tut.

    #2 
    gftt_img = cv2.goodFeaturesToTrack(blur_img,20,0.1,1)
    show_result(img_left,img_right,gftt_img,True,path="out/goodFeaturesToTrack.png")

    #3

    orb = cv2.ORB_create(3000)
    print(orb)
    kp1, des1 = orb.detectAndCompute(img_left,None)
    kp2, des2 = orb.detectAndCompute(img_right,None)

    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(img_left,img_right)

    matches = sorted(matches, key = lambda x:x.distance)

    orb_img = np.hstack((img_left,img_right))
    kp3, des3 = orb.detectAndCompute(orb_img,None)

    orb_img = cv2.drawMatches(img_left,kp1,img_right,kp2,matches,orb_img, flags=2)

    orb_img = cv2.drawKeypoints(orb_img,kp3,orb_img,color=(0,0,255))
    
    cv2.namedWindow("img",cv2.WINDOW_NORMAL)
    cv2.imwrite("out/orb.png",orb_img)
    cv2.imshow("img",orb_img)
    cv2.waitKey(0)
コード例 #44
0
    #     0, 255, 0), flags=0)
    # kp2img = cv2.drawKeypoints(img2, kp2, None, color=(
    #     0, 255, 0), flags=0)
    # cv2.imshow('img1kp', kp1img)
    # cv2.imshow('img2kp', kp2img)

    # match 两张图片的 `keypoints`
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(des1, des2)
    matches = sorted(matches, key=lambda x: x.distance)

    # 画出20对匹配的 `keypoints`
    matches_img = cv2.drawMatches(
        img1,
        kp1,
        img2,
        kp2,
        matches[:20],
        None,
        flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)

    # 执行 `RANSAC` 算法,计算出变换矩阵 `H`
    A = np.array([kp1[m.queryIdx].pt for m in matches])
    B = np.array([kp2[m.trainIdx].pt for m in matches])
    H = ransacMatching(A, B)
    # H = ransacMatching2(A, B)

    # 画出原始图片的边框(红色),以及该边框经过 `H` 变换得到的边框(蓝色)
    rows, cols, _ = img1.shape
    pts = np.array(
        [[0, 0], [0, rows - 1], [cols - 1, rows - 1], [cols - 1, 0]],
        dtype=np.float32).reshape(-1, 1, 2)
コード例 #45
0
kp1, des1 = orb.detectAndCompute(img1_01, None)
kp2, des2 = orb.detectAndCompute(img1_02, None)

bf = cv2.BFMatcher.create()
matches = bf.knnMatch(des1, des2, k=2)
#matches = sorted(matches,key=lambda x:x.distance)。

## 调试寻找最优的匹配点。
goodPoints = []
for m, n in matches:
    print(m.queryIdx, m.trainIdx)
    if m.distance < 0.65 * n.distance:
        goodPoints.append(m)
print("最优的匹配点的个数:", len(goodPoints))
draw_Params = dict(matchColor=(0, 255, 0), singlePointColor=None, flags=2)
img3 = cv2.drawMatches(img1_01, kp1, img1_02, kp2, goodPoints, None,
                       **draw_Params)

MIN_MATCH_COUNT = 10
if len(goodPoints) > MIN_MATCH_COUNT:
    src_pts = np.float32([kp1[m.queryIdx].pt
                          for m in goodPoints]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt
                          for m in goodPoints]).reshape(-1, 1, 2)

    M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)

    h, w = img1_gray.shape
    pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                      [w - 1, 0]]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, M)
    img2_gray = cv2.polylines(img1_gray, [np.int32(dst)], True, 255, 3,
コード例 #46
0
ファイル: hw5.py プロジェクト: TingWeiHuang22/homework5
def alignImages(im1, im2, meth='ORB'):
    input_img1 = im1.copy()
    input_img2 = im2.copy()
    # input_img1 = cv2.cvtColor(input_img1, cv2.COLOR_BGR2GRAY)
    # input_img2 = cv2.cvtColor(input_img2, cv2.COLOR_BGR2GRAY)
    if TF == '/TRUE/':
        cross = True
    else:
        cross = False
    print(meth)
    if meth == 'ORB':
        # Detect ORB features and compute descriptors.
        detector = cv2.ORB_create(MAX_FEATURES)
        matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=cross)
        # Convert images to grayscale

    elif meth == 'SURF':
        detector = cv2.xfeatures2d.SURF_create(MAX_FEATURES)
        matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=cross)
    elif meth == 'SIFT':
        detector = cv2.xfeatures2d.SIFT_create(MAX_FEATURES)
        matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=cross)

    keypoints1, descriptors1 = detector.detectAndCompute(input_img1, None)
    keypoints2, descriptors2 = detector.detectAndCompute(input_img2, None)

    # Match features.
    # create BFMatcher object
    # matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    # matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)

    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:numGoodMatches]

    # print(type(keypoints1))

    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    new_matches = []
    for i, match in enumerate(matches):
        if keypoints1[match.queryIdx].pt[0] > 360 and keypoints1[
                match.queryIdx].pt[0] < 720 and keypoints1[match.queryIdx].pt[
                    1] > 270 and keypoints1[match.queryIdx].pt[1] < 810:
            if keypoints2[match.queryIdx].pt[0] > 360 and keypoints2[
                    match.queryIdx].pt[0] < 720 and keypoints2[
                        match.queryIdx].pt[1] > 270 and keypoints2[
                            match.queryIdx].pt[1] < 810:
                new_matches.append(match)
                points1[i, :] = keypoints1[match.queryIdx].pt
                points2[i, :] = keypoints2[match.trainIdx].pt

    # Draw top matches
    imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, new_matches,
                                None)
    # cv2.imwrite("matches.jpg", imMatches)

    # Find homography
    h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

    # Use homography
    height, width, channels = im2.shape
    im1Reg = cv2.warpPerspective(im1, h, (width, height))

    return im1Reg, h, imMatches, matches, keypoints1, keypoints2
img = cv2.imread(filename)
match = cv2.imread(matchfile)
img=cv2.resize(img, (0, 0), fx=0.1, fy=0.1)
match=cv2.resize(match,(0,0),fx=0.1,fy=0.1)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
matchgray=cv2.cvtColor(match,cv2.COLOR_BGR2GRAY)
bfmatcher = cv2.BFMatcher_create(cv2.NORM_L2,crossCheck=True)

sift = cv2.xfeatures2d.SIFT_create()
corners = cv2.cornerHarris(gray,2,0,0.04)
kpsCorners = np.argwhere(corners>0.1*corners.max())
kpsCorners = [cv2.KeyPoint(pt[1],pt[0],3) for pt in kpsCorners]
grayWithCorners = cv2.drawKeypoints(gray,kpsCorners,None,flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
kpsCorners,dscCorners = sift.compute(gray,kpsCorners)
kp = sift.detect(gray,None)
grayWithSift =cv2.drawKeypoints(gray,kp,None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
kp,dsc =sift.compute(gray,kp)

corners = cv2.cornerHarris(matchgray,2,0,0.04)
kpsCorners2 = np.argwhere(corners>0.1*corners.max())
kpsCorners2 = [cv2.KeyPoint(pt[1],pt[0],3) for pt in kpsCorners2]
#grayWithCorners = cv2.drawKeypoints(gray,kpsCorners,None,flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
kpsCorners2,dscCorners2 = sift.compute(matchgray,kpsCorners2)

matchesCorners = bfmatcher.match(dscCorners,dscCorners2)
matchesCorners = sorted(matchesCorners,key=lambda  x:x.distance)
img3 = cv2.drawMatches(img,kpsCorners,match,kpsCorners2,matchesCorners[:10],None,flags=2)

cv2.imshow("match",img3)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
コード例 #48
0
ファイル: 04-homography.py プロジェクト: lizhan17/cvnote
for m, n in matches:
    if m.distance < 0.7 * n.distance:
        good.append(m)
if len(good) > MIN_MATCH_COUNT:
    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    matchesMask = mask.ravel().tolist()

    h, w = img1.shape
    pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                      [w - 1, 0]]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, M)

    img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

else:
    print("Not enough matches are found - %d/%d" % len(good), MIN_MATCH_COUNT)
    matchesMask = None

draw_params = dict(
    matchColor=(0, 255, 0),  # draw matches in green color
    singlePointColor=None,
    matchesMask=matchesMask,  # draw only inliers
    flags=2)

img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)

plt.imshow(img3, 'gray'), plt.show()
コード例 #49
0
ファイル: stich_it.py プロジェクト: Starks-AI/Image-Stitching
    matches = []

    # loop over the raw matches
    for m,n in rawMatches:
        # We ensure the distance is within a certain ratio of each other (i.e. Lowe's ratio test)
        if m.distance < n.distance * ratio:
            matches.append(m)
    return matches

print("Using: {} feature matcher".format(feature_matching))

fig = plt.figure(figsize=(20,8))

if feature_matching == 'bf':
    matches = matchKeyPointsBF(featuresA, featuresB, method=feature_extractor)
    img3 = cv2.drawMatches(img2,kpsA,img1,kpsB,matches[:100],
                           None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
elif feature_matching == 'knn':
    matches = matchKeyPointsKNN(featuresA, featuresB, ratio=0.75, method=feature_extractor)
    img3 = cv2.drawMatches(img2,kpsA,img1,kpsB,np.random.choice(matches,100),
                           None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)


plt.imshow(img3)
plt.show()

def getHomography(kpsA, kpsB, featuresA, featuresB, matches, reprojThresh):
    # convert the keypoints to numpy arrays
    kpsA = np.float32([kp.pt for kp in kpsA])
    kpsB = np.float32([kp.pt for kp in kpsB])

    if len(matches) > 4:
コード例 #50
0
    def matchAndImageCut(self, sift, origin, ori_kp, ori_des, typeName, featureConfig, imageConfig, job_id):
        # TODO: check file exists
        img_template = cv2.imread(featureConfig['file'], cv2.IMREAD_GRAYSCALE)

        img_detect = origin.copy()

        min_match_count = featureConfig['option'].get('minMatchCount', 50)
        distance_threshold = featureConfig['option'].get('matchDistance', 0.5)

        tpl_kp, tpl_des = sift.detectAndCompute(img_template, None)

        index_params = dict(algorithm=0, trees=5)  # algorithm = FLANN_INDEX_KDTREE
        search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(tpl_des, ori_des, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < distance_threshold * n.distance:
                good.append(m)

        logging.info("Feature [%s] matches %s, min=%s, threshold=%.2f, good=%s" % (
            typeName, len(matches), min_match_count, distance_threshold, len(good)))

        if len(good) > min_match_count:
            src_pts = np.float32([tpl_kp[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([ori_kp[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            # draw feature polyline in origin image
            pts = np.float32([[0, 0], [0, img_template.shape[0] - 1],
                              [img_template.shape[1] - 1, img_template.shape[0] - 1],
                              [img_template.shape[1] - 1, 0]]).reshape(-1, 1, 2)
            dst = cv2.perspectiveTransform(pts, M)

            cv2.polylines(img_detect, [np.int32(dst)], True, 0, 1)

            # draw detected image
            matchesMask = mask.ravel().tolist()
            draw_params = dict(matchColor=(0, 255, 0),  # draw matches in green color
                               singlePointColor=None,
                               matchesMask=matchesMask,  # draw only inliers
                               flags=2)

            draw_img = cv2.drawMatches(img_template, tpl_kp, origin, ori_kp, good, None, **draw_params)
            tools.writeImageJob(draw_img, job_id + '/step1', 'draw matching %s' % typeName)


            # draw normalize image's polyline in origin image

            normalized_pts = np.float32([
                [-1 * featureConfig['x'], -1 * featureConfig['y']],
                [-1 * featureConfig['x'], imageConfig['h'] - featureConfig['y'] - 1],
                [imageConfig['w'] - featureConfig['x'] - 1, imageConfig['h'] - featureConfig['y'] - 1],
                [imageConfig['w'] - featureConfig['x'] - 1, -1 * featureConfig['y']]]) \
                .reshape(-1, 1, 2)

            normalized_dst = cv2.perspectiveTransform(normalized_pts, M)
            cv2.polylines(img_detect, [np.int32(normalized_dst)], True, 0, 2)

            # add offset to src_pts so that it can create right matrix
            for p in src_pts:
                p[0][0] += featureConfig.get('x', 0)
                p[0][1] += featureConfig.get('y', 0)

            M2, mask2 = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)

            normalized_polygons = []
            for d in np.int32(normalized_dst).tolist():
                normalized_polygons.append({
                    'x': d[0][0],
                    'y': d[0][1]
                })



            return float(len(good)) / float(len(matches)), img_detect, M2, normalized_polygons
        else:
            return 0, None, None, None
コード例 #51
0
                    default='box_in_scene.png')
args = parser.parse_args()

img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE)
img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE)
if img1 is None or img2 is None:
    print('Could not open or find the images!')
    exit(0)

#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
minHessian = 400
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
keypoints2, descriptors2 = detector.detectAndCompute(img2, None)

#-- Step 2: Matching descriptor vectors with a brute force matcher
# Since SURF is a floating-point descriptor NORM_L2 is used
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE)
matches = matcher.match(descriptors1, descriptors2)

#-- Draw matches
img_matches = np.empty(
    (max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1], 3),
    dtype=np.uint8)
cv.drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches)

#-- Show detected matches
cv.imshow('Matches', img_matches)

cv.waitKey()
コード例 #52
0
ファイル: sift.py プロジェクト: Aniket-Gujarathi/d2-net-rord
    kp2, des2 = sift.detectAndCompute(im2, None)
    bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
    matches = bf.match(des1, des2)
    matches = sorted(matches, key=lambda x: x.distance)

    src_pts = np.array(np.float32([kp1[m.queryIdx].pt for m in matches]))
    dst_pts = np.array(np.float32([kp2[m.trainIdx].pt for m in matches]))

    model, inlier = ransac((src_pts, dst_pts),
                           AffineTransform,
                           min_samples=4,
                           residual_threshold=8,
                           max_trials=10000)
    n_inliers = np.sum(inlier)
    print('Number of inliers: %d.' % n_inliers)

    inlier_keypoints_left = [
        cv2.KeyPoint(point[0], point[1], 1) for point in src_pts[inlier]
    ]
    inlier_keypoints_right = [
        cv2.KeyPoint(point[0], point[1], 1) for point in dst_pts[inlier]
    ]
    placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]

    image3 = cv2.drawMatches(im1, inlier_keypoints_left, im2,
                             inlier_keypoints_right, placeholder_matches, None)
    cv2.imwrite(
        '/scratch/udit/robotcar/overcast/ipm/sift/pair' + str(i) + '.png',
        image3)
    print(i)
コード例 #53
0
                      for m in good]).reshape(-1, 1, 2)
ptsImage2 = np.array([keypointsImage2[m.trainIdx].pt
                      for m in good]).reshape(-1, 1, 2)

# Getting homography matrix after applying RANSAC on
# well matched keypoints on both images with projection error <= 1
H, mask = cv2.findHomography(ptsImage1, ptsImage2, cv2.RANSAC)
print('Homography Matrix:')
print(H)

# Get 10 inlier matches after applying RANSAC
matchesMask = getInliers(mask, 10)
inlierImage = cv2.drawMatches(image1,
                              keypointsImage1,
                              image2,
                              keypointsImage2,
                              good,
                              None,
                              matchesMask=matchesMask,
                              flags=2)
cv2.imwrite('Results/task1_matches.jpg', inlierImage)

# Getting corners of image 1 in the 2nd plane
h, w, d = image1.shape
image1Corners = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                            [w - 1, 0]]).reshape(-1, 1, 2)
image1CornersPlane2 = np.squeeze(cv2.perspectiveTransform(image1Corners, H))

# The following function gives the max dimensions that image1 can
# take in the second plane (for displaying all pixels)
xMin, yMin, xMax, yMax = getExtremePoints(image1CornersPlane2)
コード例 #54
0
ファイル: kaze.py プロジェクト: mtlazul/computer-vision
def KAZE(prev_frame, frame):
    ## [load]
    gray0 = cv.cvtColor(prev_frame, cv.COLOR_BGR2GRAY)
    gray1 = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    ## [load]

    ## [KAZE]
    kaze = cv.KAZE_create()
    t1d = cv.getTickCount()
    kpts0, desc0 = kaze.detectAndCompute(gray0, None)
    kpts1, desc1 = kaze.detectAndCompute(gray1, None)
    t2d = cv.getTickCount()

    # time for detection
    tDetectKaze = 1000 * (t2d - t1d) / cv.getTickFrequency()
    ## [KAZE]

    ## [Brute-Force matching]
    matcher = cv.BFMatcher(cv.NORM_L2, crossCheck=False)
    t1m = cv.getTickCount()
    matches = matcher.knnMatch(desc0, desc1, 2)
    t2m = cv.getTickCount()

    # time for detection
    tMatchKaze = 1000 * (t2m - t1m) / cv.getTickFrequency()
    ## [Brute-Force matching]

    ## [ratio test filtering]
    matched = []
    match_ratio = 0.75
    for m, n in matches:
        if m.distance < match_ratio * n.distance:
            matched.append(m)
    ## [ratio test filtering]

    ## [draw final matches]
    res = np.empty((max(gray0.shape[0],
                        gray1.shape[0]), gray0.shape[1] + gray1.shape[1], 3),
                   dtype=np.uint8)
    cv.drawMatches(gray0,
                   kpts0,
                   gray1,
                   kpts1,
                   matched,
                   res,
                   flags=cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
    ## [draw final matches]

    ## [RESULTS]
    print('\nKAZE Matching Results')
    print('*******************************')
    print('# Keypoints 1:                                        \t',
          len(kpts0))
    print('# Keypoints 2:                                        \t',
          len(kpts1))
    print('# Matches:                                            \t',
          len(matched))
    print('# Detection and Description Time (ms):                \t',
          tDetectKaze)
    print('# Matching Time (ms):                                 \t',
          tMatchKaze)

    return res
コード例 #55
0
kp1,des1 = orb.detectAndCompute(reeses,mask=None)

kp2,des2 = orb.detectAndCompute(cereals,mask=None)

bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

matches = bf.match(des1,des2)

# matches[i].distance -> level of similarity

# Sort the matches by distance
matches = sorted(matches, key=lambda x:x.distance)

## Draw Lines where the matches are
reeses_matches = cv2.drawMatches(reeses,kp1,cereals,kp2,matches[:25], None)

# -------------------- Sift 

sift = cv2.xfeatures2d.SIFT_create()

kp1,des1 = sift.detectAndCompute(reeses,mask=None)
kp2,des2 = sift.detectAndCompute(cereals,mask=None)

bf = cv2.BFMatcher()

# k number of best matches
# Return one array of 2 matches [[match1,match2],...]
matches = bf.knnMatch(des1,des2,k=2)

goodMatches = []
コード例 #56
0
# =============================================================================
import cv2
import numpy as np

img = cv2.imread('wd.jpg')
img2= cv2.imread('wd2.jpg')

gray1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

sift = cv2.xfeatures2d.SIFT_create()

kp1, des1 = sift.detectAndCompute(gray1, None)
kp2, des2 = sift.detectAndCompute(gray2, None)

bf = cv2.BFMatcher()

matches = bf.match(des1, des2)

matches = sorted(matches, key = lambda x:x.distance)

result = np.zeros((img.shape[1] + img2.shape[1], img.shape[0]),np.uint8)

result = cv2.drawMatches(img, kp1, img2, kp2, matches[:30], result, (0,255,0),flags = 0)




cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #57
0
matchImg = np.zeros_like(img1)

grayImg1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
grayImg2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(grayImg1, None)
kp2, des2 = sift.detectAndCompute(grayImg2, None)

bf = cv2.BFMatcher_create(normType=cv2.NORM_L2, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
matchImg = cv2.drawMatches(img1,
                           kp1,
                           img2,
                           kp2,
                           matches,
                           matchImg,
                           flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
# bf = cv2.BFMatcher_create(normType=cv2.NORM_L2, crossCheck=False)
# matches = bf.knnMatch(des1, des2, k=2)
# 使用阈值筛选距离
# good = []
# for m, n in matches:
#  if m.distance < 0.05*n.distance:
#     good.append([m])
#
# matchImg = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, matchImg, flags=2)

cv2.imshow('Match Image', matchImg)
コード例 #58
0
import cv2
import numpy as np
img1 = cv2.imread("the_book_thief.jpg", cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread("me_holding_book.jpg", cv2.IMREAD_GRAYSCALE)
# ORB Detector
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
# Brute Force Matching
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
matching_result = cv2.drawMatches(img1,
                                  kp1,
                                  img2,
                                  kp2,
                                  matches[:50],
                                  None,
                                  flags=2)
cv2.imshow("Img1", img1)
cv2.imshow("Img2", img2)
cv2.imshow("Matching result", matching_result)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #59
0
def align_images(im1, im_ref, savematch=False):
    """
    对齐图像
    :param im1: 被对齐的图像,可为彩色或灰度
    :param im_ref: 参考图像,类型必须和 im1 保持一致
    :param savematch: 是否保存特征点匹配图,只能保存最后一次的匹配
    :return:
    """

    # 需要调整下面参数,保证全部试卷可以,又提高速度
    MAX_FEATURES = 1000
    GOOD_MATCH_PERCENT = 0.5

    # Convert images to grayscale
    if len(im1.shape) == 3:
        im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
        im2Gray = cv2.cvtColor(im_ref, cv2.COLOR_BGR2GRAY)
    else:
        im1Gray = im1
        im2Gray = im_ref

    # Detect ORB features and compute descriptors.
    orb = cv2.ORB_create(MAX_FEATURES)
    keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
    keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)

    # Match features.
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)

    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:numGoodMatches]

    if savematch:
        # Draw top matches
        imMatches = cv2.drawMatches(im1, keypoints1, im_ref, keypoints2,
                                    matches, None)
        cv2.imwrite("../result/matches.jpg", imMatches)

    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

    # Find homography
    # h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

    # h, _ = cv2.estimateAffinePartial2D(points1, points2, method=cv2.RANSAC)
    h, _ = cv2.estimateAffine2D(points1, points2, method=cv2.RANSAC)

    # Use homography
    height, width = im_ref.shape[:2]
    # im1Reg = cv2.warpPerspective(im1, h, (width, height))
    im1Reg = cv2.warpAffine(im1,
                            h, (width, height),
                            flags=cv2.INTER_LINEAR,
                            borderMode=cv2.BORDER_REPLICATE)

    return im1Reg, h
コード例 #60
0
    good=np.asarray(good)

    h,w = image_mountain1.shape[:-1]
    pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
    dst = cv2.perspectiveTransform(pts,H)
    result1 = panoTwoImages(image_mountain2_color, image_mountain1_color, H)


else:
    print ("No good match - %d/%d" % (len(good),MIN_MATCH_COUNT))
    matchesMask = None
draw_params = dict(matchColor = (0,200,0), 
                   singlePointColor =(000, 0, 0),
                   matchesMask = matchesMask, 
                   flags = 2)
img3 = cv2.drawMatches(image_mountain1_color,keypoints,image_mountain2_color,keypoints1,good,None,**draw_params)
cv2.imwrite("task1_matches.jpg",img3)
cv2.imwrite("task1_pano.jpg",result1)

#task2
tsucuba_left=cv2.imread("tsucuba_left.png", cv2.IMREAD_GRAYSCALE)
tsucuba_right=cv2.imread("tsucuba_right.png", cv2.IMREAD_GRAYSCALE)
tsucuba_left_color=cv2.imread("tsucuba_left.png")
tsucuba_right_color=cv2.imread("tsucuba_right.png")
sift=cv2.xfeatures2d.SIFT_create()
keypoints,descriptors=sift.detectAndCompute(tsucuba_left,None)
keypoints1,descriptors1=sift.detectAndCompute(tsucuba_right,None)
tsucuba_left=cv2.drawKeypoints(tsucuba_left_color,keypoints,None)
tsucuba_right=cv2.drawKeypoints(tsucuba_right_color,keypoints1,None)
cv2.imwrite("task2_sift1.jpg",tsucuba_left)
cv2.imwrite("task2_sift2.jpg",tsucuba_right)