def matched_key_points(n_stills):
    for i in range(2, n_stills + 1):
    
        #Loading images
        img1 = cv2.imread('Motion_Image' + str(i-1) + '.png')
        img2 = cv2.imread('Motion_Image' + str(i) + '.png')

        kaze = cv2.KAZE_create() #note: opencv3 does not have SIFT or SURF

        kp1, des1 = kaze.detectAndCompute(img1,None)
        kp2, des2 = kaze.detectAndCompute(img2,None)

        bf = cv2.BFMatcher()
        # print type(des1)
        matches = bf.knnMatch(des1,des2,k=2)

        # print matches
        
        good = []
        for m,n in matches:
            if m.distance < 0.05*n.distance:
                good.append([m])

        # matches = sorted(matches, key = lambda x:x.distance)
        img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,flags=2,outImg =None)

        plt.imshow(img3),plt.show()
Example #2
0
def match(img1, img2):
	sift = cv2.xfeatures2d.SIFT_create()
	kp1, des1 = sift.detectAndCompute(img1,None)
	kp1_img = cv2.drawKeypoints(img1, kp1, img1, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
	
	kp2, des2 = sift.detectAndCompute(img2,None)
	kp2_img=cv2.drawKeypoints(img2,kp2,img2,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

	bf = cv2.BFMatcher()
	matches = bf.knnMatch(des1,des2, k=2)
	
	good = []
	src = []
	dst = []
	for m,n in matches:
		if m.distance < 0.7*n.distance:
			good.append([m])
	
	src_pts = np.float32([ kp1[m[0].queryIdx].pt for m in good ]).reshape(-1,1,2)
	
	des_cor = [kp2[m[0].trainIdx].pt for m in good]
	dst_pts = np.float32(des_cor).reshape(-1,1,2)
	
	centroid = findCentroid(des_cor)
	
	img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good, img2, flags=2)
	
	M, mask = cv2.findHomography(src_pts, dst_pts)
	
	
	return (matches, kp1_img, kp2_img, img3, M, centroid)
Example #3
0
def comprassionFlann(frame1,frame2):
    good = []
    sift = cv.xfeatures2d.SURF_create(nOctaves=3)
    

    
    kp1, des1 = sift.detectAndCompute(frame1,None)
    kp2, des2 = sift.detectAndCompute(frame2,None)

    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv.FlannBasedMatcher(index_params,search_params)

    matches = flann.knnMatch(des1,des2,k=2)

    matchesMask = [[0,0] for i in range(len(matches))]

    for i,(m,n) in enumerate(matches):
        if m.distance < 0.7*n.distance:
            matchesMask[i]=[1,0]
            good.append(m)
    draw_params = dict(matchColor = (0,255,0),
                   singlePointColor = (255,0,0),
                   matchesMask = matchesMask,
                   flags = 0)

    img3 = cv.drawMatchesKnn(frame1,kp1,frame2,kp2,matches,None,**draw_params)
    #plt.imshow(img3),plt.show()
    return(img3,good)
Example #4
0
def comprassionWithImagesParallel(frame1,frame2):
    numberOfSlide = []

    sift = cv.xfeatures2d.SURF_create()

    #SURF
    #surf = cv.xfeatures2d.SURF_create()

        #Находим точки
    kpts1 = sift.detect(frame1,None)
    kpts2 = sift.detect(frame2,None)
        #Дескриптор
        #descriptor = cv.BRISK_create()
        #brief = cv.xfeatures2d.BriefDescriptorExtractor_create()
        #Матчер    
    matcher = cv.BFMatcher()

    good = []
    goods = []
    k1, d1 = sift.compute(frame1, kpts1)
    k2, d2 = sift.compute(frame2, kpts2)
    if (d1 is not None and d2 is not None):
        matches = matcher.knnMatch(d1,d2,k=2)
        try:
            for m,n in matches:
                if m.distance < 0.85*n.distance:
                    good.append([m])
                    goods.append(m)
                    
        except(ValueError):
            print("VALUEERROR")
            return(None,None)
    img3 = cv.drawMatchesKnn(frame1,k1,frame2,k2,good,frame1,flags=2)
    #plt.imshow(img3),plt.show()
    return(img3,good)
def processImagesFlannBasedMatcher(frame1,frame2):
    frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
    # detector = cv2.xfeatures2d.SIFT_create()
    detector = cv2.xfeatures2d.SURF_create(500)
    kp1, desc1 = detector.detectAndCompute(frame1, None)
    kp2, desc2 = detector.detectAndCompute(frame2, None)

    # FlannBasedMatcher
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    # or pass empty dictionary
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params,search_params)
    matches = flann.knnMatch(desc1, desc2, k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0,0] for i in xrange(len(matches))]
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.7*n.distance:
            matchesMask[i]=[1,0]

    draw_params = dict(matchColor = (0,255,0),
                       singlePointColor = (255,0,0),
                       matchesMask = matchesMask,
                       flags = 0)

    img = cv2.drawMatchesKnn(rame1,kp1,frame2,kp2,matches,None,**draw_params)
    cv2.imshow('demo',img)
Example #6
0
def flann_matcher():
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # FLANN parameters
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)  # or pass empty dictionary

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches, so create a mask
    matches_mask = [[0, 0] for _ in xrange(len(matches))]

    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:
            matches_mask[i] = [1, 0]

    draw_params = dict(
        matchColor=(0, 255, 0),
        singlePointColor = (255, 0, 0),
        matchesMask = matches_mask,
        flags = 0
    )

    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)

    plt.imshow(img3,), plt.show()
Example #7
0
	def search(self):
		if self.imageLink:
			img1 = cv2.imread(self.imageLink)
			img1 = imutils.resize(img1, width=1000)
			img1 = cv2.cvtColor(img1, cv2.COLOR_RGBA2GRAY)
			surf = cv2.xfeatures2d.SURF_create(2000,7,7)

			kp1, des1 = surf.detectAndCompute(img1, None)

			ans = {}

			for imagePath in glob.glob("logo/**/*.png"):
				img2 = cv2.imread(imagePath)
				img2 = cv2.cvtColor(img2, cv2.COLOR_RGBA2GRAY)
				img2 = imutils.resize(img2, width=500)
				kp2, des2 = surf.detectAndCompute(img2, None)
				bf = cv2.BFMatcher()
				matches = bf.knnMatch(des2, des1, k=2)
				good = []
				for m,n in matches:
				    if m.distance < 0.7*n.distance:
				        good.append([m])
				percent = len(good)
				ans.update({imagePath:percent})
				percent = str(round(percent, 2))
				img3 = cv2.drawMatchesKnn(img2, kp2, img1, kp1, good, None, flags=2)
				cv2.putText(img3, ("Good: " + str(len(good))), (10,400), 0, 1, (0,0,255),2)
				cv2.imwrite("static/result/" + str(os.path.basename(imagePath)), img3)

		if max(ans.values()) >= 20:
			result = str(max(ans, key=ans.get))
		else:
			result = 'UNKNOWN'
		return result
def ORB_Flann_Matching():
    img1 = cv2.imread('box.png',0)          # queryImage
    img2 = cv2.imread('box_in_scene.png',0) # trainImage

    # Initiate ORB detector
    orb = cv2.ORB_create()

    kp1, des1 = orb.detectAndCompute(img1,None)
    kp2, des2 = orb.detectAndCompute(img2,None)

    # FLANN parameters
    FLANN_INDEX_LSH = 0
    index_params= dict(algorithm = FLANN_INDEX_LSH, table_number = 6, key_size = 12,multi_probe_level = 1)
    search_params = dict(checks=50)   # or pass empty dictionary

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(np.asarray(des1,np.float32),np.asarray(des2,np.float32), k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0,0] for i in xrange(len(matches))]

    # ratio test as per Lowe's paper
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.7*n.distance:
            matchesMask[i]=[1,0]
    draw_params = dict(matchColor = (0,255,0),
                   singlePointColor = (255,0,0),
                   matchesMask = matchesMask,
                   flags = 0)
    img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)

    plt.imshow(img3,),plt.show()
def SIFT_BF_Matching():
    img1 = cv2.imread('box.png',0)          # queryImage
    img2 = cv2.imread('box_in_scene.png',0) # trainImage

    # Initiate SIFT detector
    sift = cv2.SIFT()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)

    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1,des2, k=2)

    # Apply ratio test
    # 比值测试,首先获得与A距离最近的点B(最近)和点C(次近),只有当B/C小于阈值(0.75)时才被认为是匹配,因为假设匹配是一一对应的,真正的匹配理想距离为0
    good = []
    for m,n in matches:
       if m.distance < 0.75*n.distance:
           good.append([m])

    # cv2.drawMatchesKnn expects list of lists as matches.
    img3 = cv2.drawMatchesKnn(img1, kp1, img2,kp2,good,flags=2)

    plt.imshow(img3),plt.show()
    def _default_file_(self):
        """
        Compute the default value for the field file_
        This method runs a pattern recognition between all the chosen patterns
        or, if only one is selected, between this one and all the others.
        """
        # take active ids
        ret = []
        active_ids = self.env.context['active_ids']
        # avoid templates without pattern
        tpl = self.env['sponsorship.correspondence.template'].search(
            [('id', 'in', active_ids), ('pattern_image', '!=', None)])
        # loop over all the active templates
        for t1 in tpl:
            # if only one element check with all the other ones
            if len(tpl) == 1:
                tpl2 = self.env[
                    'sponsorship.correspondence.template'].search([
                        ('pattern_image', '!=', None)])
            # if more, do only the ones not done yet
            else:
                tpl2 = self.env[
                    'sponsorship.correspondence.template'].search([
                        ('id', '>=', t1.id), ('pattern_image', '!=', None)])

            # create the first pattern image
            with tempfile.NamedTemporaryFile() as template_file:
                template_file.write(base64.b64decode(
                    t1.pattern_image))
                template_file.flush()
                # Find the pattern inside the template image
                img = cv2.imread(template_file.name)

            # loop over second templates
            for t2 in tpl2:
                # create the second pattern image
                with tempfile.NamedTemporaryFile() as template_file:
                    template_file.write(base64.b64decode(
                        t2.pattern_image))
                    template_file.flush()
                    # Find the pattern inside the template image
                    img2 = cv2.imread(template_file.name)

                kp1, kp2, good = pr.patternRecognition(img, img2,
                                                       full_result=True)
                img3 = cv2.drawMatchesKnn(img, kp1, img2,
                                          kp2, good, None, flags=2)
                name = t1.name + '-' + t2.name + '.png'
                cv2.imwrite(name, img3)
                with open(name, 'r') as f:
                    img3 = f.read()
                    remove(name)
                image_id = self.env['crosscheck.image'].create({
                    'name': name,
                    'image': base64.b64encode(img3),
                })
                ret.append(image_id.id)
        return [(6, 0, ret)]
def find_template(img, templates, test=False, threshold=0.8):
    """
    Use pattern recognition to detect which template correponds to img.

    :param img: Opencv Image to analyze
    :param templates: Collection of all templates
    :param bool test: Enable the test mode (return an image as the last \
        parameter). If False, the image is None.
    :param threshold: Ratio of the templates' keypoints requested
    :returns: Detected template, center position of detected pattern,\
        image showing the detected keypoints for all the template
    :rtype: template, layout, None or np.array
    """
    # number of keypoint related between the picture and the pattern
    max_ratio_keypoints = 0.0
    key_img = False
    matching_template = None
    if test:
        test_img = []
    else:
        test_img = None

    for template in templates:
        # Crop the image to speedup detection and avoid false positives
        crop_area = template.get_pattern_area()

        if test:
            recognition = patternRecognition(
                img, template.pattern_image, crop_area, full_result=True)
            with tempfile.NamedTemporaryFile() as temp:
                temp.write(base64.b64decode(template.pattern_image))
                temp.flush()
                img2 = cv2.imread(temp.name)
            (xmin, ymin), img1 = subsetImage(img, crop_area)
            if recognition is not None:
                kp1, kp2, good = recognition
                img3 = cv2.drawMatchesKnn(img1, kp1, img2,
                                          kp2, good, None, flags=2)
            else:
                img3 = img1
            test_img.append(img3)

        # try to recognize the pattern
        tmp_key = patternRecognition(
            img, template.pattern_image, crop_area)
        # check if it is a better result than before
        if (tmp_key is not None and
                (threshold < float(len(tmp_key)) / float(
                    template.nber_keypoints) > max_ratio_keypoints)):
            # save all the data if it is better
            max_ratio_keypoints = float(
                len(tmp_key))/float(template.nber_keypoints)
            key_img = tmp_key
            matching_template = template
    return matching_template, keyPointCenter(key_img), test_img
def calculateSIFTMatch(track, detectedObject):
    matcher = cv2.BFMatcher()

    if (len(detectedObject.keypoints) > 0) and (len(track.currentKeypoints) > 0):
        matches = matcher.knnMatch(track.currentDescriptor, detectedObject.descriptors, k=2)

        # Keep a tally of ratio scores
        ratio_sum = 0

        # Apply Lowe's ratio test to determine a good match
        good = []
        for match in matches:
            if len(match) != 2:
                continue

            m, n = match
            ratio = m.distance / n.distance

            if ratio <= RATIO_THRESHOLD:
                ratio_sum += RATIO_THRESHOLD / ratio if ratio > 0 else 0.001  # Ratio of n vs. m with respect to RATIO_THRESHOLD
                good.append(m)

        goodMatches = []
        if len(good) > 0:
            # Using RANSAC to further improve the matching and eliminate wrong matches
            # TODO Sometimes the homography transform results in a bounding box OUTSIDE the destination
            src_pts = np.float32([track.currentKeypoints[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([detectedObject.keypoints[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            if mask is not None:
                matchesMask = mask.ravel().tolist()

                for index, match in enumerate(good):
                    if matchesMask[index]:
                        goodMatches.append([match])

            if len(goodMatches) > MIN_PTS_THRESHOLD:
                match_img = cv2.drawMatchesKnn(track.currentImage, track.currentKeypoints,
                                               detectedObject.image, detectedObject.keypoints,
                                               goodMatches, None, flags=2)
                #cv2.imshow(str(track.id), match_img)
                #cv2.imwrite(str(frame_number) + "-" + str(track.id) + ".png",match_img)

            # TODO What's a better score here?

            # If we can find a transform, we should have a very good match
            return ratio_sum * (len(goodMatches) + 1)

        # This should always be equal to 0
        return ratio_sum

    # Failure condition -- track or object had no features to match
    return -1
Example #13
0
def compute_sift_matches(im0g, im1g, y_th=3, good_ratio=0.75, verbose=False):
    """
    Compute the SIFT matches given two images
    :param im0: first image
    :param im1: second image
    :param y_th: allowed distance in y-direction of the matches
    :param good_ratio: used to filter out low-response keypoints
    :return: the sorted good matches (based on response)
    """

    if int(cv2.__version__.split('.')[0])<3:
        sift = cv2.SIFT(nOctaveLayers=7)
    else:
        sift = cv2.xfeatures2d.SIFT_create(nOctaveLayers=7)

    kp0, des0 = sift.detectAndCompute(im0g, None)
    kp1, des1 = sift.detectAndCompute(im1g, None)

    bf_matcher = cv2.BFMatcher()
    matches = bf_matcher.knnMatch(des0, des1, k=2)

    # Apply ratio test
    good = []
    y_diffs = []
    for m, n in matches:
        if m.distance < good_ratio * n.distance:
            y_diff = kp0[m.queryIdx].pt[1] - kp1[m.trainIdx].pt[1]
            if np.abs(y_diff) < y_th:
                y_diffs.append(y_diff)
                good.append([m])

    sorted_good = sorted(good, key=lambda x: kp0[x[0].queryIdx].response, reverse=False)

    if verbose:
        plt.figure(15)
        im3 = cv2.drawKeypoints(im0g, kp0, im0g, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        plt.imshow(im3)
        plt.title('im0 keypoints')
        plt.pause(0.1)

        plt.figure(17)
        im4 = cv2.drawKeypoints(im1g, kp1, im1g, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        plt.imshow(im4)
        plt.title('im1 keypoints')
        plt.pause(0.1)

        im5 = cv2.drawMatchesKnn(im0g, kp0, im1g, kp1, sorted_good, im4, flags=2)

        plt.figure(16)
        plt.imshow(im5)
        plt.title('Found ' + str(len(sorted_good)) )
        plt.pause(0.1)

    return sorted_good, y_diffs, kp0, kp1
def opencv_feature_match(png1, png2, feature_detector='SIFT', threshold=0.75):
    """Given two images in png format in memory, compare them and return the
    number of matches and a third image.

    :param feature_detector: "ORB", "FAST", "BRISK"

    """
    img1 = opencvify_image(png1)
    img2 = opencvify_image(png2)

    detector = cv2.FeatureDetector_create(feature_detector)
    kp1 = detector.detect(img1, None)
    kp2 = detector.detect(img2, None)

    sift = cv2.SIFT()
    kp1, des1 = sift.compute(img1, kp1)
    kp2, des2 = sift.compute(img2, kp2)

    # create BFMatcher object
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    log.debug('{} matches found.'.format(len(matches)))

    # sort matches in order of distance
    matches = sorted(matches, key = lambda x:x.distance)

    """des1 = numpy.asarray(des1, dtype=numpy.float32)
    des2 = numpy.asarray(des2, dtype=numpy.float32)

    # flann settings
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)"""

    """bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) (FOR ORB)"""

    good = []
    for m,n in matches:
        if m.distance < threshold*n.distance:
            good.append([m])
    log.debug('{} matches were within the threshold.'.format(len(good)))

    # draw matches and output to a third image
    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, flags=2)
    return_value, image_analysis_array = cv2.imencode('.png', img3)

    """output = open('test.png', 'w')
    output.write(image_analysis_array.tostring())
    output.close()"""

    return (len(matches), image_analysis_array.tostring())
Example #15
0
def facialRecognition():
    try:
        img1 = cv2.VideoCapture(0)
        img2 = cv2.imread("me.jpg",0)

        # Initiate SIFT detector
        sift = cv2.SIFT()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1,None)
        kp2, des2 = sift.detectAndCompute(img2,None)

        # FLANN parameters
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks=50)   # or pass empty dictionary

        flann = cv2.FlannBasedMatcher(index_params,search_params)

        matches = flann.knnMatch(des1,des2,k=2)

        # Need to draw only good matches, so create a mask
        matchesMask = [[0,0] for i in xrange(len(matches))]

        # ratio test as per Lowe's paper
        for i,(m,n) in enumerate(matches):
            if m.distance < 0.7*n.distance:
                matchesMask[i]=[1,0]

        draw_params = dict(matchColor = (0,255,0),
                           singlePointColor = (255,0,0),
                           matchesMask = matchesMask,
                           flags = 0)

        img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)

        plt.imshow(img3,),plt.show()

    except KeyboardInterrupt:
        toDo = raw_input("-"*45+"\n\n[Notice] | Script Paused...\n>" + "---"*13 + "<\n> Main Menu-------------(1)" + "\n> Continue Script-------(Any Other Key)\n>" + "---"*13 + "<\n\n> ")
        if "1" in toDo:
            cv2.destroyAllWindows()
            main()
        else:
            pass
    except Exception,e:
        if "something@R" in str(e):
            print "[Error] | Something Happaned."
        else:
            raw_input("[Error] | > " + str(e))
            cv2.destroyAllWindows()
            main()
def matchObjectTrack(detectedObject, track, matcher, MIN_PTS_THRESHOLD, frame_number):
    if (len(detectedObject.keypoints) > 0) and (len(track.currentKeypoints) > 0):
        matches = matcher.knnMatch(track.currentDescriptor, detectedObject.descriptors, k=2)

        # Apply Lowe's ratio test to determine a good match
        good = []
        for match in matches:
            if len(match) != 2:
                continue

            m, n = match
            if m.distance <= RATIO_THRESHOLD * n.distance:
                good.append(m)

        goodMatches = []
        if len(good) > 0:
            # Using RANSAC to further improve the matching and eliminate wrong matches
            src_pts = np.float32([track.currentKeypoints[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([detectedObject.keypoints[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

            mask = []
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            if mask is not None:
                matchesMask = mask.ravel().tolist()

                index = 0
                # print len(good)
                # print mask.shape
                for match in good:
                    # print match
                    m = match
                    # print index
                    # print len(matchesMask)
                    if matchesMask[index]:
                        goodMatches.append([m])
                    index += 1

            if len(goodMatches) > MIN_PTS_THRESHOLD:
                match_img = cv2.drawMatchesKnn(
                    track.currentImage,
                    track.currentKeypoints,
                    detectedObject.image,
                    detectedObject.keypoints,
                    goodMatches,
                    None,
                    flags=2,
                )
                # cv2.imshow(str(track.id), match_img)
                # cv2.imwrite(str(frame_number) + "-" + str(track.id) + ".png",match_img)
                return True
    return False
    def create_panorama(self):
        # initialize a surf detector with hessian as 400
        surf = self.surf(400)
        imgOne = cv2.imread(self.images[0])
        imgTwo = cv2.imread(self.images[1])

        grayOne = self.cvt_gray(imgOne)
        grayTwo = self.cvt_gray(imgTwo)

        # extract the keypoinst and descriptors for individaul images
        kpOne, desOne = surf.detectAndCompute(grayOne, None)
        kpTwo, desTwo = surf.detectAndCompute(grayTwo, None)

        imgOneU = cv2.drawKeypoints(imgOne, kpOne, None, (0, 127,
                                                          0),
                                    4)
        imgTwoU = cv2.drawKeypoints(imgTwo, kpTwo, None, (0, 127,
                                                          0),
                                    4)
        # initialize flann matcher
        flann = self.flann()
        matches = flann.knnMatch(np.array(desOne), np.array(desTwo),
                                 k=2)

        # store all the good matches
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        src_pts = np.float32([kpOne[m.queryIdx].pt for m in good])
        dst_pts = np.float32([kpTwo[m.trainIdx].pt for m in good])

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,
                                     5.0)

        im_out = cv2.warpPerspective(imgOne, M, (
            imgOne.shape[1] + imgTwo.shape[1],
            imgOne.shape[0]))
        im_out[0:imgTwo.shape[0], 0:imgTwo.shape[1]] = imgTwo

        self.plot_img(self.m, self.n, imgOneU, "Keypoints 1")
        self.plot_img(self.m, self.n, imgTwoU, "Keypoints 2")

        img3 = cv2.drawMatchesKnn(imgOne, kpOne, imgTwo, kpTwo,
                                  matches[:100], None,
                                  matchColor=(0, 127, 255), flags=2)
        self.plot_img(self.m, self.n, img3, "Matching Keypoints")
        self.plot_img(self.m, self.n, im_out, "Panorama")
        self.show_plot()
Example #18
0
def function7():
    img1 = cv2.imread('../a3.jpg',0)
    img2 = cv2.imread('../a1.jpg',0)
    sift = cv2.xfeatures2d.SIFT_create()
    kypnt1, desc1 = sift.detectAndCompute(img1,None)
    kypnt2, desc2 = sift.detectAndCompute(img2,None)
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(desc1,desc2, k=2)
    good = []
    for m,n in matches:
        if m.distance < 0.75*n.distance:
            good.append([m])
    img3 = cv2.drawMatchesKnn(img1,kypnt1,img2,kypnt2,good,None,flags=2)
    plt.imshow(img3)
    plt.show()
Example #19
0
def show_match(im_kp, image, raw_matches, ref_kp, reference_form_path):
    reference = cv2.imread(reference_form_path, 0)
    img_match = np.empty((max(reference.shape[0], image.shape[0]), reference.shape[1] + image.shape[1], 3),
                         dtype=np.uint8)
    good = []
    for m, n in raw_matches:
        if m.distance < 0.75 * n.distance:
            good.append([m])
    # img3 = cv2.drawMatchesKnn(image, im_kp, reference, ref_kp, matches, None, **draw_params)
    im_matches = cv2.drawMatchesKnn(image, im_kp, reference, ref_kp, good, outImg=img_match, matchColor=None,
                                    singlePointColor=(255, 255, 255), flags=2)
    factor = 0.5
    im_matches_small = cv2.resize(im_matches, None, fx=factor, fy=factor)
    cv2.imshow("match", im_matches_small)
    cv2.waitKey(0)
Example #20
0
def renderMatch(img1, kp1, img2, kp2, matches):
	matchesMask = [[0,0] for i in xrange(len(matches))]
	for i,x in enumerate(matches):
		# print i, type(x), len(x)
		# print 
	    if x[0].distance < 0.75*x[1].distance:
	        matchesMask[i]=[1,0]

	draw_params = dict(matchColor = (0,255,0),
	                   singlePointColor = (255,0,0),
	                   matchesMask = matchesMask,
	                   flags = 0)

	img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)
	cv2.imwrite('test.JPG',img3)
	plt.imshow(img3,),plt.show()
Example #21
0
def getMatches(des1, des2, kp1=None, kp2=None, img1=None, img2=None, count=100, threshold=0.5):
    '''
        @param: descriptor, des1 -- queryIdx,  des2 -- trainIdx
        return: list of tuple as (queryIdx, trainIdx)
    '''
    # kp1, des1 = fE.getDescriptor(fid1,nfea=nfeatures)
    # kp2, des2 = fE.getDescriptor(fid2,nfea=nfeatures)
    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks=50)   # or pass empty dictionary

    # flann = cv2.FlannBasedMatcher(index_params,search_params)

    # matches = flann.knnMatch(des1,des2,k=2)
    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1,des2, k=2)
    # print "Num of all matches are %d"%len(matches)
    final_matches = []
    # Need to draw only good matches, so create a mask
    matchesMask = [[0,0] for i in xrange(len(matches))]

    # ratio test as per Lowe's paper
    ratio = threshold
    matches.sort(key=lambda m: m[0].distance)
    for i,(m,n) in enumerate(matches):
        if m.distance < ratio*n.distance:
            matchesMask[i]=[1,0]
            final_matches.append(m)
            if (len(final_matches) > count):
                break
    if (kp1 is None):
        return final_matches

    for m in final_matches:
        print m.distance
    
    draw_params = dict(matchColor = (0,255,0),
                       singlePointColor = (255,0,0),
                       matchesMask = matchesMask,
                       flags = 0)

    img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)

    plt.imshow(img3),plt.show()
    return final_matches
Example #22
0
    def flann_matcher(source1, source2):
        """
        Question 1.3

        Trace les mises en correspondance des caractéristiques de deux images selon la méthode de "Brute Force"

        Problèmes sur Ubuntu et Mac, OpenCV 3.1.0. Il semblerait que ce soit un bug lié à OpenCV
        https://github.com/Itseez/opencv/issues/5667

        Parameters
        ----------
        source1: la source de l'image de gauche
        source2: la source de l'image de droite
        """

        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(source1, None)
        kp2, des2 = sift.detectAndCompute(source2, None)

        # FLANN parameters
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)  # or pass empty dictionary

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(des1, des2, k=2)

        # Need to draw only good matches, so create a mask
        matchesMask = [[0, 0] for i in xrange(len(matches))]

        # ratio test as per Lowe's paper
        for i, (m, n) in enumerate(matches):
            if m.distance < 0.7 * n.distance:
                matchesMask[i] = [1, 0]

        draw_params = dict(matchColor=(0, 255, 0),
                           singlePointColor=(255, 0, 0),
                           matchesMask=matchesMask,
                           flags=0)

        img3 = cv2.drawMatchesKnn(source1, kp1, source2, kp2, matches, None, **draw_params)

        plt.imshow(img3, ), plt.show()
def book_opt_flow(n_stills):
    for i in range(2, n_stills + 1):
    
        #Loading images
        img1 = cv2.imread('Motion_Image' + str(i-1) + '.png')
        img2 = cv2.imread('Motion_Image' + str(i) + '.png')

        #Detect keypoints in the left and right images
        ffd = cv2.FastFeatureDetector_create()
        left_kpts = ffd.detect(img1, None)
        right_kpts = ffd.detect(img2,None)

        left_pts = cv2.KeyPoint_convert(left_kpts)
        #in the C++ code they establish an empty array here for right pts

        prevgray = cv2.cvtColor(img1,cv2.COLOR_RGB2GRAY)
        gray = cv2.cvtColor(img2,cv2.COLOR_RGB2GRAY)

        right_pts, vstatus, verror = cv2.calcOpticalFlowPyrLK(prevgray, gray, left_pts, None)

        right_points_to_find = np.array([])
        right_points_to_find_back_index = np.array([])
        right_features = np.array([])

        for i in range(0,len(vstatus)):
            if vstatus[i] and verror[i] < 12.0:
                right_points_to_find_back_index = np.append(right_points_to_find_back_index, i)
                right_points_to_find = np.append(right_points_to_find, right_pts[i])
            else:
                vstatus[i] = 0

        right_features = np.append(right_features, cv2.KeyPoint_convert(right_kpts))

        bf = cv2.BFMatcher()
        # print type(right_points_to_find[0]), type(right_features[0])
        matches = bf.knnMatch(right_points_to_find.astype('float32'),right_features.astype('float32'),k=2)

        nearest_neighbors = []
        for m,n in matches:
            if m.distance < 0.7*n.distance:
                nearest_neighbors.append([m])

        img3 = cv2.drawMatchesKnn(img1,right_kpts,prevgray,left_kpts,nearest_neighbors,flags=2, outImg=None)

        plt.imshow(img3),plt.show(
)
Example #24
0
def sign_detect(matches, case, face, kp_case, kp, frame):
    matchesMask = [[0,0] for i in range(len(matches))]
    is_detected = 0
    match_points = []
    for i,(m,n) in enumerate(matches):

        if m.distance < 0.7*n.distance:
            matchesMask[i]=[1,0]

            match_points.append([kp[m.trainIdx].pt[0],kp[m.trainIdx].pt[1]])

            is_detected += 1

    if Z_DEBUG:
        draw_params = dict(matchColor = (0,0,255),
                    singlePointColor = (255,0,0),
                    matchesMask = matchesMask,
                    flags = 0)
        img_show = cv2.drawMatchesKnn(face,kp_case,frame,kp,matches,None,**draw_params)


    #reject repeated points
    if len(match_points) >=2:
        match_points = np.unique(match_points, axis=0)

    #reject outliers
    if len(match_points) >= DETECT_POINT_THRESHOLD:
        sd_x = 1.7
        sd_y = 1.7
        match_points = reject_outliers(match_points,sd_x,sd_y)

    is_detected = len(match_points)

    if Z_DEBUG:
        for [x,y] in match_points:
            cv2.circle(frame,(int(x),int(y)),4,(255,0,0),4)
        print(case + " : " + str(is_detected))
        cv2.imshow(case,img_show)
        cv2.imshow(case + ' ',frame)

    #buffer
    PARKING_BUFFER[0:BUFFER_SIZE-1] = PARKING_BUFFER[1:BUFFER_SIZE]
    PARKING_BUFFER[BUFFER_SIZE-1] = is_detected
    if np.size(np.where(PARKING_BUFFER>=DETECT_POINT_THRESHOLD)) >= DETECT_THRESHOLD:
        print("parking detected!")
        '''
Example #25
0
def feature_matching(img1, img2, savefig=False):
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)  # or pass empty dictionary
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches2to1 = flann.knnMatch(des2, des1, k=2)

    matchesMask_ratio = [[0, 0] for i in xrange(len(matches2to1))]
    match_dict = {}
    for i, (m, n) in enumerate(matches2to1):
        if m.distance < 0.7 * n.distance:
            matchesMask_ratio[i] = [1, 0]
            match_dict[m.trainIdx] = m.queryIdx

    good = []
    recip_matches = flann.knnMatch(des1, des2, k=2)
    matchesMask_ratio_recip = [[0, 0] for i in xrange(len(recip_matches))]

    for i, (m, n) in enumerate(recip_matches):
        if m.distance < 0.7 * n.distance:  # ratio
            if m.queryIdx in match_dict and match_dict[m.
                                                       queryIdx] == m.trainIdx:  #reciprocal
                good.append(m)
                matchesMask_ratio_recip[i] = [1, 0]

    if savefig:
        draw_params = dict(
            matchColor=(0, 255, 0),
            singlePointColor=(255, 0, 0),
            matchesMask=matchesMask_ratio_recip,
            flags=0)
        img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, recip_matches, None,
                                  **draw_params)

        plt.figure(), plt.xticks([]), plt.yticks([])
        plt.imshow(img3,)
        plt.savefig("feature_matching.png", bbox_inches='tight')

    return ([kp1[m.queryIdx].pt
             for m in good], [kp2[m.trainIdx].pt for m in good])
Example #26
0
def surf_matching(image_1,image_2):
    
    # Load the images
    image = cv2.imread('{}'.format(image_1))

    # Convert the images to grayscale
    gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Surf extration
    surf = cv2.xfeatures2d.SURF_create(10000)
    (kp1, desc1) = surf.detectAndCompute(gray_img, None)

    # Setting up samples and responses for KNN
    samples = np.array(desc1)
    responses = np.arange(len(kp1), dtype = np.float32)

    # KNN training
    knn = cv2.ml.KNearest_create()
    knn.train(samples,cv2.ml.ROW_SAMPLE, responses)

    # Loading a template image and searching for similar keypoints
    template = cv2.imread('{}'.format(image_2))
    templateg = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
    (kp2, desc2) = surf.detectAndCompute(templateg, None)
    
    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(desc1,desc2, k=2)
    
    matchesMask = [[0,0] for i in range(len(matches))]
    
    # ratio test as per Lowe's paper
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.7*n.distance:
            matchesMask[i]=[1,0]
    
    draw_params = dict(matchColor = (0,255,0),
                       singlePointColor = (255,0,0),
                       matchesMask = matchesMask,
                       flags = 0)
    
    # cv2.drawMatchesKnn expects list of lists as matches.
    img3 = cv2.drawMatchesKnn(gray_img,kp1,template,kp2,matches,None,**draw_params)
    
    plt.imshow(img3),plt.show 
Example #27
0
def display_matches(text1: str, text2: str, fontsize: int = 64) -> None:
    img1 = create_image(text1, fontsize)
    img2 = create_image(text2, fontsize)
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1,None)
    print(len(kp1))
    kp2, des2 = sift.detectAndCompute(img2,None)
    img = np.array([])
    img=cv2.drawKeypoints(img1,kp1,img)
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1,des2, k=2)
    good = []
    for m,n in matches:
        if m.distance < 100:
            good.append([m])
    img3 = np.array([])
    img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,img3,flags=2)
    plt.imshow(img3),plt.show()
Example #28
0
def surf():
  
  surf = cv2.xfeatures2d.SURF_create(400)
  imgleft = cv2.imread('left_small.jpg',1) #256x256?
  imgleft = cv2.GaussianBlur( imgleft, (5, 5), 0 )
  imgleft = cv2.cvtColor(imgleft,cv2.COLOR_BGR2GRAY)
  
  kp1, des1 = surf.detectAndCompute(imgleft,None)
  image = frame.array
  image = cv2.GaussianBlur( image, (5, 5), 0 )
  image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#  surf.upright = True

  kp2, des2 = surf.detectAndCompute(image,None)

  
  FLANN_INDEX_KDTREE = 0
#  index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
#  search_params = dict(checks=50)   # or pass empty dictionary
  
#  flann = cv2.FlannBasedMatcher(index_params,search_params)
  
#  matches = flann.knnMatch(des1,des2,k=2)

  bf = cv2.BFMatcher()
  matches = bf.knnMatch(des1, des2, k=2)
  
  # Need to draw only good matches, so create a mask
  matchesMask = [[0,0] for i in xrange(len(matches))]
  
  # ratio test as per Lowe's paper
  for i,(m,n) in enumerate(matches):
    if m.distance < 0.7*n.distance:
      matchesMask[i]=[1,0]
  
  draw_params = dict(matchColor = (0,255,0),
          singlePointColor = (255,0,0),
          matchesMask = matchesMask,
          flags = 0)
  
  img3 = cv2.drawMatchesKnn(imgleft,kp1,image,kp2,matches,None,**draw_params)
    
  return img3
def do_feature_matching(img1, img2, detector, matcher):
    gray_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    keypoints_img1, descriptors_img1 = detector.detectAndCompute(gray_img1, None)
    keypoints_img2, descriptors_img2 = detector.detectAndCompute(gray_img2, None)

    cv2.imshow('img1', cv2.drawKeypoints(img1, keypoints_img1, None))
    cv2.imshow('img2', cv2.drawKeypoints(img2, keypoints_img2, None))

    matches = matcher.knnMatch(descriptors_img1, descriptors_img2, 2)

    good = []
    for m, n in matches:
        if m.distance < 0.8 * n.distance:
            good.append([m])

    out_img = cv2.drawMatchesKnn(gray_img1, keypoints_img1, gray_img2, keypoints_img2, good, None, flags=2)
    cv2.imshow('img', out_img)
    cv2.waitKey()
def main():
    stream=urllib.urlopen(CAM_URL)
    bytes=''
    ts=time.time()
    while True:
        bytes+=stream.read(2048)
        a = bytes.find('\xff\xd8')
        b = bytes.find('\xff\xd9')
        if a==-1 or b==-1:
            continue

        # Frame available
        rtimestamp=time.time()
        jpg = bytes[a:b+2]
        bytes= bytes[b+2:]
        img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
        cv2.imshow('RAW',img)
        
        #ORB to get corresponding points
        kp, des = sift.detectAndCompute(img,None)
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des_ref,des,k=2)
        m = []
        for ma,na in matches:
            if ma.distance < 0.75*na.distance:
                m.append([ma])
        img3 = cv2.drawMatchesKnn(img_ref,kp_ref,img,kp,m[:4], None,flags=2)
        cv2.imshow('MatchesKnn',img3)

        #pts_ref = np.float32([[kp_ref[m[0].queryIdx].pt[0],kp_ref[m[0].queryIdx].pt[1]],[kp_ref[m[1].queryIdx].pt[0],kp_ref[m[1].queryIdx].pt[1]],[kp_ref[m[2].queryIdx].pt[0],kp_ref[m[2].queryIdx].pt[1]],[kp_ref[m[3].queryIdx].pt[0],kp_ref[m[3].queryIdx].pt[1]]])
        #pts     = np.float32([[kp[m[0].trainIdx].pt[0],kp[m[0].trainIdx].pt[1]],[kp[m[1].trainIdx].pt[0],kp[m[1].trainIdx].pt[1]],[kp[m[2].trainIdx].pt[0],kp[m[2].trainIdx].pt[1]],[kp[m[3].trainIdx].pt[0],kp[m[3].trainIdx].pt[1]]])
        # Perspective Transform
        #M = cv2.getPerspectiveTransform(pts_ref,pts)
        #dst = cv2.warpPerspective(img,M,(cols,rows))
        #cv2.imshow('Perspective Transform',dst)

        # Print lag
        print(time.time()-ts)
        ts=time.time()

        if cv2.waitKey(1) == 27:
            exit(0)
    4.4 Set previous image to present
    4.5 Save snapshot (Optional)
    4.6 Alert if same_position_feature_count is more than config and save snapshot
    4.7 (Optional) Save image for keypoint found
    4.8 Clearing value for next frame

"""
    # 4.1 Copy image frame to use
    compared_feature_image = np.array(image_frame)
    new_feature_detected_image = np.array(image_compare_model)

    """Show matches via feature list"""

    # 4.2 Show newly found feature in present frame
    try:
        new_feature_detected_image = cv.drawMatchesKnn(img, kpSift, image_crop, present_kp, model_good_feature_list, image_compare_model,
                                                       singlePointColor = (255,0,0), matchColor = (0,255,0), flags=2)
        cv.imshow("New feature detected", new_feature_detected_image)

    except Exception as e:
        print('error here model image', e)
        error_log.append('error here model image' + str(e))


    if present_good_feature_list == []:
        print(' present_good_feature_list = null but nothing to worry')

    # 4.3 Show feature from previous frame
    try:
        compared_feature_image = cv.drawMatchesKnn(img, kpSift, image_crop, present_kp, present_good_feature_list, image_frame,
                                                   singlePointColor = (255,0,0), matchColor = (0,255,255), flags=2)
        cv.imshow("Tracking previous feature", compared_feature_image)
Example #32
0
        def FLANN():
            connectdb = sqlite3.connect("results.db")
            cursor = connectdb.cursor()

            #  img1 = cv2.imread("1.png")
            #  img11 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
            img1 = cv2.imread('1.png', cv2.IMREAD_GRAYSCALE)
            imageA = cv2.resize(img1, (450, 237))
            database = os.listdir("db")

            for image in database:
                try:
                    img2 = cv2.imread("db/" + image)

                    imgprocess = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

                    imageB = cv2.resize(imgprocess, (450, 237))

                    matcheslist = ""

                    # Initiate SIFT detector
                    sift = cv2.xfeatures2d.SIFT_create()
                    # find the keypoints and descriptors with SIFT
                    kp1, des1 = sift.detectAndCompute(imageA, None)
                    kp2, des2 = sift.detectAndCompute(imageB, None)

                    # BFMatcher with default params
                    bf = cv2.BFMatcher()
                    matches = bf.knnMatch(des1, des2, k=2)
                    # Apply ratio test
                    good = []
                    for m, n in matches:
                        if m.distance < 0.75 * n.distance:
                            good.append([m])
                    # cv.drawMatchesKnn expects list of lists as matches.

                    amount = len(good)
                    print('Comparing input image to ' + image + " using FLANN")

                    title = "Comparing"
                    fig = plt.figure(title)

                    cursor.execute(
                        "INSERT INTO flann (percentage, filename) VALUES (?, ?);",
                        (amount, image))
                    connectdb.commit()

                except:
                    pass

            percentages = list(connectdb.cursor().execute(
                "SELECT * FROM flann order by percentage desc limit 10"))
            print(percentages[0])
            highest = percentages[0]

            # getting number of matches
            highestperct = round(highest[0], 2)
            print(highestperct)

            # getting file name of highest similarity
            filename = highest[1]
            print(filename)

            image1 = cv2.imread('1.png', cv2.IMREAD_GRAYSCALE)  # input image
            img1 = cv2.resize(image1, (450, 237))
            image2 = cv2.imread('db/' + filename,
                                cv2.IMREAD_GRAYSCALE)  # closet image
            img2 = cv2.resize(image2, (450, 237))

            # Initiate SIFT detector
            sift = cv2.xfeatures2d.SIFT_create()

            # find the keypoints and descriptors with SIFT
            keypoints1, destination1 = sift.detectAndCompute(img1, None)
            keypoints2, destination2 = sift.detectAndCompute(img2, None)

            # FLANN parameters
            FLANN_INDEX_KDTREE = 1
            index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
            search_params = dict(checks=50)  # or pass empty dictionary
            flann = cv2.FlannBasedMatcher(index_params, search_params)
            matches = flann.knnMatch(destination1, destination2, k=2)

            # Need to draw only good matches, so create a mask
            matchesMask = [[0, 0] for i in range(len(matches))]

            # ratio test as per Lowe's paper
            for i, (m, n) in enumerate(matches):
                if m.distance < 0.7 * n.distance:
                    matchesMask[i] = [1, 0]

            draw_params = dict(matchColor=(0, 255, 0),
                               singlePointColor=(255, 0, 0),
                               matchesMask=matchesMask,
                               flags=cv2.DrawMatchesFlags_DEFAULT)

            print(draw_params)
            print(len(matches))

            img3 = cv2.drawMatchesKnn(img1, keypoints1, img2, keypoints1,
                                      matches, None, **draw_params)
            plt.imshow(img3)
            plt.suptitle("Amount of matches : " + str(highestperct) +
                         "\n Similarity Percentage : " + str(percent) + "%")
            disease = filename[:7]
            txt = "Results: \n - " + filename + "\n - " + disease + "\n - Analysis results are safe, no diseases found"
            plt.text(0.40, 0.20, txt, transform=fig.transFigure, size=11)
            plt.axis("off")

            plt.show()

            cursor.execute("DELETE FROM flann")
            connectdb.commit()
import cv2
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline

hacker = cv2.imread('./TCIA_pancreas_labels⁩/⁨label0011⁩/000011-101.jpg', 0)
items = cv2.imread('./Pancreas-CT⁩/PANCREAS_0011/⁩000011-101.jpg', 0)

sift = cv2.xfeatures2d.SIFT_create()

kp1, des1 = sift.detectAndCompute(hacker, None)
kp2, des2 = sift.detectAndCompute(items, None)

bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)

good = []
for match1, match2 in matches:
    if match1.distance < 0.75 * match2.distance:
        good.append([match1])

sift_matches = cv2.drawMatchesKnn(hacker, kp1, items, kp2, good, None, flags=2)

display(sift_matches)
Example #34
0
    scr_pts = np.float32([keypoints1[m.queryIdx].pt
                          for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float32([keypoints2[m.trainIdx].pt
                          for m in good]).reshape(-1, 1, 2)
    #Computamos la homografía con RANSAC
    H, mask = cv2.findHomography(dst_pts, scr_pts, cv2.RANSAC, 5.0)

else:
    print('No existen suficientes matchs')

#Aplicamos la transformación perspectiva H a la imagen 2
imgTrans = cv2.warpPerspective(img2, H, (600, 800))

#Mezclamos ambas imágenes
alpha = 0.5
blend = np.array(
    imgTrans * alpha + img1 * (1 - alpha), dtype=np.uint8
)  #Se mezclan las imágenes, en las zonas de coincidencia aumenta el brillo un 50%, en cambio, en las zonas de no coincidencia, el brillo baja un 50%.
img3 = cv2.drawMatchesKnn(
    img1, keypoints1, img2, keypoints2, matches[:10], None, flags=0
)  #La función cv2.drawMatchesKnn() nos ayuda a dibujar las coincidencias. Apila dos imágenes horizontalmente y dibuja líneas desde la primera imagen a la segunda, mostrando todas las k mejores coincidencias. Si k = 2, dibujará dos líneas de coincidencia para cada punto clave. Entonces tenemos que pasar una máscara si queremos dibujarla selectivamente.

#Mostramos los resultados obtenidos
cv2.imshow('Imagen 1', img1)
cv2.imshow('Imagen 2', img2)
cv2.imshow('Perspectiva', imgTrans)
cv2.imshow('Combinacion de imagenes', blend)
cv2.imshow('Matches', img3)

cv2.waitKey(0)
cv2.destroyAllWindows()
Example #35
0
# Initialize matches
flann = cv.FlannBasedMatcher(index_params, search_params)

# Find matches
matches = flann.knnMatch(des1, des2, k=2)

# Flags:
# cv.DRAW_MATCHES_FLAGS_DEFAULT
# cv.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG
# cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
# cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
img3 = cv.drawMatchesKnn(
    img1,
    kp1,
    img2,
    kp2,
    matches[:10],
    None,
    flags=cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS,
)

# Draw matches
cv.namedWindow("BRIEF BF Matcher", cv.WINDOW_NORMAL)
cv.imshow("BRIEF BF Matcher", img3)

# Calculate homography
# Consider point filtering
obj = []
scene = []
for match in matches:
    # Consider testing match[0].distance to match[1].distance
Example #36
0
    matrix, mask = cv.findHomography(pts2, pts1, cv.RANSAC, 5.0)
    return matrix


def image_stitch(image1, image2, homography):
    image = cv.warpPerspective(image2, homography, (image2.shape[1] * 2, image2.shape[0]))
    image[0:image1.shape[0], 0:image1.shape[1]] = image1
    return image


if __name__ == '__main__':
    # Load two images
    img1 = cv.imread('data/qh1.jpg')
    img2 = cv.imread('data/qh2.jpg')

    # Find keypoints
    keypoints1, descripters1 = keypoints_detect(img1)
    keypoints2, descripters2 = keypoints_detect(img2)

    # Calculate homography
    matches = match(descripters1, descripters2)
    M = homography(keypoints1, keypoints2, matches)

    # Stitch images
    img3 = image_stitch(img1, img2, M)

    # Show Matches
    img_match = cv.drawMatchesKnn(img1, keypoints1, img2, keypoints2,
                                  matches[:50], outImg=np.array([]))
    show([img_match, img3])
Example #37
0
    def drawMatches(self, setting, algorithm, crossCheck, file):
        det = Detector(algorithm, crossCheck)

        with open(file, "r") as json_file:
            data = json.load(json_file)

        for x in data:
            if (x == "SOX14HET_131218_TiledIMGLeftThalamus_Slide18_Cropped.tif"
                ):
                confocalDescriptor, confocalKeypoint, img1 = det.otherFunction(
                    False, (self.confocalFilePath + x), 20, setting)  #
                for y in data[x]:
                    print(y)
                    tissueCyteDescriptor, tissueCyteKeypoint, img2 = self.masker.otherFunction(
                        self.tissueCyteFilePath, setting, algorithm,
                        crossCheck, y[0])  #
                    if (algorithm != "orb"):
                        confocalDescriptor = np.asarray(
                            confocalDescriptor, np.float32)
                        tissueCyteDescriptor = np.asarray(
                            tissueCyteDescriptor, np.float32)
                    else:
                        confocalDescriptor = np.asarray(
                            confocalDescriptor, np.uint8)
                        tissueCyteDescriptor = np.asarray(
                            tissueCyteDescriptor, np.uint8)
                    if (algorithm == "sift"):
                        tissueCyteDescriptor = np.reshape(
                            tissueCyteDescriptor, (-1, 128))
                        confocalDescriptor = np.reshape(
                            confocalDescriptor, (-1, 128))
                    elif (algorithm == "surf"):
                        tissueCyteDescriptor = np.reshape(
                            tissueCyteDescriptor, (-1, 64))
                        confocalDescriptor = np.reshape(
                            confocalDescriptor, (-1, 64))
                    elif (algorithm == "orb"):
                        tissueCyteDescriptor = np.reshape(
                            tissueCyteDescriptor, (-1, 32))
                        confocalDescriptor = np.reshape(
                            confocalDescriptor, (-1, 32))
                    matches = det.otherMatchingFunction(
                        confocalDescriptor, tissueCyteDescriptor)
                    print(len(matches))
                    _, fileName = os.path.split(file)
                    if (crossCheck != True):
                        img3 = cv.drawMatchesKnn(img1,
                                                 confocalKeypoint,
                                                 img2,
                                                 tissueCyteKeypoint,
                                                 matches,
                                                 None,
                                                 flags=2)
                    else:
                        img3 = cv.drawMatches(img1,
                                              confocalKeypoint,
                                              img2,
                                              tissueCyteKeypoint,
                                              matches,
                                              None,
                                              flags=2)
                    cv.imwrite((
                        '/mnt/TissueCyte80TB/Marcus_ImageMatching/DrawMatches/'
                        + fileName + '_' + y[0] + '_' + x + '_' +
                        'MatchesImage.jpg'), img3)
Example #38
0
kp2, des2 = sift.detectAndCompute(img2_gray, None)

# 设置算法运行开始时间
# 4) Flann特征匹配
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# print(matches)
goodMatch = []
# 匹配优化
for m, n in matches:
    if m.distance < 0.50 * n.distance:
        goodMatch.append(m)
# 增加一个维度
goodMatch = np.expand_dims(goodMatch, 1)
flann_img_out = cv2.drawMatchesKnn(img1_rectified,
                                   kp1,
                                   img2_rectified,
                                   kp2,
                                   goodMatch,
                                   None,
                                   flags=2)
flann_time = (time.time() - start)
print("flann_time:", '% 4f' % (flann_time * 1000))

cv2.imshow('SIFT_match', flann_img_out)  #展示图片
cv2.waitKey(0)  #等待按键按下
cv2.destroyAllWindows()  #清除所有窗口
Example #39
0
        if m.distance < 0.75 * n.distance:
            good.append([m])
    good_arr[i - 1] = good
    num_match[i - 1] = len(good)

temp = num_match
print(num_match)
max_match = max(temp)
print(max_match)
indx = num_match.index(max_match)
print(indx)
#Draw first best matches.
img3 = cv2.drawMatchesKnn(query,
                          kp1,
                          img[indx],
                          kp2[indx],
                          good_arr[indx],
                          None,
                          flags=2)
'''if indx==0:
        servomotor1()
if indx==1:
        servomotor2()
if indx=2
        servomotor3()
if indx=3
        servomotor4()
'''
#plt.imshow(img3),plt.show()
#cv2.imwrite('ploting',img3)
cv2.imshow('image', img3)
Example #40
0
match = cv2.BFMatcher()
matches = match.knnMatch(desr,desl,k=2)
print(matches)

good=[]
not_good = []
for m,n in matches:
    if(m.distance < 0.75 * n.distance):
        good.append([m])
    else:
        not_good.append(m)



draw_param = dict(matchColor = (0,255,0),flags=2)
img3 =cv2.drawMatchesKnn(imageRight,kpr,imageLeft,kpl,good,None,**draw_param)
#img4 =cv2.drawMatches(imageLeft,kpl,imageRight,kpr,matches,None,**draw_param)
cv2.imshow("Wow", img3)



def trim(frame):
    #crop top
    if not np.sum(frame[0]):
        return trim(frame[1:])
    #crop bottom
    elif not np.sum(frame[-1]):
        return trim(frame[:-2])
    #crop left
    elif not np.sum(frame[:,0]):
        return trim(frame[:,1:])
Example #41
0
image1_keypoint = cv2.drawKeypoints(image1,keypoint1,None)
image2_keypoint = cv2.drawKeypoints(image2,keypoint2,None)


# BFMatcher with default params
burteforce_matcher= cv2.BFMatcher()
total_matches  = burteforce_matcher.knnMatch(d1,d2, k=2)

# Apply ratio test
good_points = []
for m,n in total_matches:
    if m.distance < 0.75*n.distance:
        good_points.append([m])

# cv2.drawMatchesKnn expects list of lists as matches.
image_knn = cv2.drawMatchesKnn(image1,keypoint1,image2,keypoint2,good_points,None,flags=2)

cv2.imwrite("task2_sift1.png", image1_keypoint)
cv2.imwrite("task2_sift2.png", image2_keypoint)
cv2.imwrite("task2_matches_knn.jpg",image_knn)

#task2 part3 selecting inlier matche pairs and computing epiline 
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)

flann = cv2.FlannBasedMatcher(index_params,search_params)
total_matches = flann.knnMatch(d1,d2,k=2)

points_good = []
Example #42
0
import numpy as np
import cv2
from matplotlib import pyplot as plt

if __name__ == '__main__':
    img1 = cv2.imread('../images/manowar_logo.png', 0)
    img2 = cv2.imread('../images/manowar_single.jpg', 0)
    orb = cv2.ORB_create()
    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(des1, des2)
    matches = sorted(matches, key=lambda x: x.distance)
    img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:25], img2, flags=2)
    plt.figure(12)
    plt.subplot(1, 2, 1)
    plt.imshow(img3)
    matches2 = bf.knnMatch(des1, des2, k=1)
    img4 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches2, img2, flags=2)
    plt.subplot(1, 2, 2)
    plt.imshow(img4)
    plt.show()
Example #43
0

#to show only best matches, a mask is created
matchesMask = [[0,0] for i in xrange(len(matches))]

for i, (m,n) in enumerate(matches):
	if m.distance < 0.9 * n.distance:
		matchesMask[i] = [1,0]


#dictionary that describes drawing details when the result is displayed
draw_params = dict(matchColor = (0, 0, 255), singlePointColor = (255,0,0), matchesMask = matchesMask, flags = 0)


#creates final image
final = cv2.drawMatchesKnn(source, kp1, train, kp2, matches, final, **draw_params)

#cv2.imwrite("/Users/student_mac1/Desktop/matchoctagon2.jpeg", final)

#displays result
cv2.namedWindow("final", cv2.WINDOW_NORMAL)
cv2.imshow("final", final)

cv2.waitKey(0)

'''
mylist = [[5,4,7],[6,9,3],[6,3,8]]

for i,j,k in mylist:
	print i
	print j
Example #44
0
    for i,(m,n) in enumerate(matches3):
        if m.distance < 0.7*n.distance:
            matchesMask3[i]=[1,0]

            
    draw_params1 = dict(matchColor = (0,255,0),
                       singlePointColor = (255,0,0),
                       matchesMask1 = matchesMask1,
                       flags = 0)

    draw_params2 = dict(matchColor = (0,255,0),
                       singlePointColor = (255,0,0),
                       matchesMask2 = matchesMask2,
                       flags = 0)
    draw_params3 = dict(matchColor = (0,255,0),
                       singlePointColor = (255,0,0),
                       matchesMask3 = matchesMask3,
                       flags = 0)

    img5 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params1)
    img6 = cv2.drawMatchesKnn(img1,kp1,img3,kp3,matches,None,**draw_params2)
    img7 = cv2.drawMatchesKnn(img1,kp1,img4,kp4,matches,None,**draw_params3)

    cv2.imshow('reference Image', img1)
    cv2. waitKey()

    plt.imshow(img5,),plt.show()
    plt.imshow(img6,),plt.show()
    plt.imshow(img7,),plt.show()
Example #45
0
import numpy as np
import cv2
from matplotlib import pyplot as plt

img1 = cv2.imread('test_field.JPG',0)          # queryImage
img2 = cv2.imread('star.PNG',0) # trainImage

# Initiate SIFT detector
sift = cv2.SIFT()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)

# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)

# Apply ratio test
good = []
for m,n in matches:
    if m.distance < 0.75*n.distance:
        good.append([m])

# cv2.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,flags=2)

plt.imshow(img3)
Example #46
0
def attach_homo(left_image, right_image):
    left_image = np.array(left_image)
    right_image = np.array(right_image)
    # cv.imshow('src',left_image)
    # cv.imshow('warp',right_image)
    top, bot, left, right = 100, 100, 150, 150
    srcImg = cv.copyMakeBorder(left_image,
                               top,
                               bot,
                               left,
                               right,
                               cv.BORDER_CONSTANT,
                               value=(0, 0, 0))
    testImg = cv.copyMakeBorder(right_image,
                                top,
                                bot,
                                left,
                                right,
                                cv.BORDER_CONSTANT,
                                value=(0, 0, 0))
    img1gray = cv.cvtColor(srcImg, cv.COLOR_BGR2GRAY)
    img2gray = cv.cvtColor(testImg, cv.COLOR_BGR2GRAY)

    img1gray = downsample_image(img1gray, 1)
    img2gray = downsample_image(img2gray, 1)

    sift = cv.xfeatures2d_SIFT().create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1gray, None)
    kp2, des2 = sift.detectAndCompute(img2gray, None)
    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0, 0] for i in range(len(matches))]

    good = []
    pts1 = []
    pts2 = []
    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.5 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
            matchesMask[i] = [1, 0]

    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=0)
    # print(matches)
    img3 = cv.drawMatchesKnn(img1gray, kp1, img2gray, kp2, matches, None,
                             **draw_params)
    # cv.imshow('drawmatch',img3)
    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)
        print(M)
        np.save("zed_M", M)
Example #47
0
def transform(videopath):
    cap = cv2.VideoCapture(videopath)
    n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    _, prev = cap.read()

    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    # fourcc = cv2.VideoWriter_fourcc('I', '4', '2', '0')
    fourcc = cv2.VideoWriter_fourcc('I', '4', '2',
                                    '0')  # ('I','4','2','0' 对应avi格式)
    path2 = videopath[:-4] + "_S" + ".avi"
    out = cv2.VideoWriter(path2, fourcc, 25, (w, h))

    # Convert frame to grayscale
    img1 = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    # Pre-define transformation-store array
    transforms = np.zeros((n_frames - 1, 3), np.float32)
    # scaleX_list = np.zeros((n_frames - 1, 1), np.float32)
    # scaleY_list = np.zeros((n_frames - 1, 1), np.float32)
    pose_matrixs = np.zeros((n_frames - 1, 3, 3), np.float32)

    # transforms = []
    # scaleY_list = []
    # scaleX_list = []
    for j in range(n_frames - 2):
        success, img2 = cap.read()
        img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
        if not success:
            break

        orb = cv.ORB_create(nlevels=8)
        kp1 = orb.detect(img1, None)
        # compute the descriptors with ORB
        kp1, des1 = orb.compute(img1, kp1)
        kp2 = orb.detect(img2, None)
        # compute the descriptors with ORB
        kp2, des2 = orb.compute(img2, kp2)

        # get the LUT of pos For 2 imgs
        LUT_queryImg1 = []
        LUT_trainImg2 = []
        for i, n in enumerate(kp1):
            LUT_queryImg1.append((i, n.pt))
        LUT_queryImg1 = dict(LUT_queryImg1)
        # print('\nQUERY LUT', LUT_queryImg1)

        for i, n in enumerate(kp2):
            LUT_trainImg2.append((i, n.pt))
        LUT_trainImg2 = dict(LUT_trainImg2)
        # print('\nTRAIN LUT', LUT_trainImg2)
        LUT_queryImg1_des = []
        LUT_trainImg2_des = []
        for i in range(np.shape(des1)[0]):
            LUT_queryImg1_des.append((i, list(des1[i])))
        LUT_queryImg1_des = dict(LUT_queryImg1_des)
        # print('\nQUERY LUT of descriptor', LUT_queryImg1_des)

        for i in range(np.shape(des2)[0]):
            LUT_trainImg2_des.append((i, list(des2[i])))
        LUT_trainImg2_des = dict(LUT_trainImg2_des)
        # print('\ntrain LUT of descriptor', LUT_trainImg2_des)

        index_params = dict(algorithm=6,
                            table_number=6,
                            key_size=12,
                            multi_probe_level=2)
        search_params = {}
        flann = cv.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(des1, des2, k=2)
        # print('\nthe num of unfimaltered matched kp pairs :' + str(len(matches)))

        # Need to draw only good matches, so create a mask
        matchesMask = [[0, 0] for i in range(len(matches))]
        # ratio test as per Lowe's paper
        num_MP = 0
        matched_idx = []
        for i, (m, n) in enumerate(matches):
            if m.distance < 0.2 * n.distance:  # top two distances m n, a smaller parameter means a higher precision n less matching
                matchesMask[i] = [
                    1, 0
                ]  # this mask encode connect the best m or the second best n
                num_MP += 1
                matched_idx.append([m.queryIdx, m.trainIdx])

        matched_idxNpos = []
        prvs_pts = np.zeros((len(matched_idx), 1, 2))
        next_pts = np.zeros((len(matched_idx), 1, 2))
        k = 0
        for i in matched_idx:
            query_idx, train_idx = i[0], i[1]
            prvs_pts[k] = np.array(LUT_queryImg1[query_idx])
            next_pts[k] = np.array(LUT_trainImg2[train_idx])
            k = k + 1

        # print('\nthe index N pos for matched kp (query,train):', matched_idxNpos)  # same   query is ordered.

        draw_params = dict(matchColor=(255, 218, 185),
                           singlePointColor=(255, 0, 0),
                           matchesMask=matchesMask,
                           flags=cv.DrawMatchesFlags_DEFAULT)
        print("\n frame ID :", j)
        print('the num of orb kp in query_img img1 :' + str(len(kp1)))
        print('the num of orb kp in train_img img2 :' + str(len(kp2)))
        print('the num of filtered matched kp pairs :' + str(num_MP))

        img3 = cv.drawMatchesKnn(img1, kp1, img2, kp2, matches, None,
                                 **draw_params)
        cv.namedWindow('', 0)  # so that the img will be clipped
        cv.imshow('', img3)

        cv.waitKey(1)
        img1 = img2

        km = prvs_pts.astype(np.float32)
        km2 = next_pts.astype(np.float32)
        if len(km) < 3:
            print("Lost tracking! ")
            break
        # m = cv2.getAffineTransform(km, km2)
        m = cv2.estimateRigidTransform(km, km2, fullAffine=False)
        if m is None:
            print("transform is Nonetype!")
            break
        # dx = m[0, 2]
        # dy = m[1, 2]
        # da = np.arctan2(m[1, 0], m[0, 0])
        # scale_x = np.sqrt(m[0, 1] ** 2 + m[0, 0] ** 2)
        # scale_y = np.sqrt(m[1, 1] ** 2 + m[1, 0] ** 2)
        mat = np.zeros((3, 3))
        mat[:2, :3] = m

        if j == 37:
            print()

        dx = m[0, 2]
        dy = m[1, 2]
        da = np.arctan2(m[1, 0], m[0, 0])
        transforms[j] = [dx, dy, da]
        pose_matrixs[j] = mat.astype(np.float32)
    transforms = -np.cumsum(transforms, axis=0)
    pose_matrixs = cumdot(pose_matrixs).astype(np.float32)

    # trajectory = np.cumsum(transforms, axis=0)
    # cum_scale_X = np.cumprod(scaleX_list, axis=0)
    # cum_scale_Y = np.cumprod(scaleY_list, axis=0)
    # transforms = -trajectory
    cap.set(cv2.CAP_PROP_POS_FRAMES, 0)

    # Write n_frames-1 transformed frames
    for i in range(n_frames - 2):
        # Read next frame
        success, frame = cap.read()
        if not success:
            break

        dx = transforms[i, 0]
        dy = transforms[i, 1]
        da = transforms[i, 2]
        # if m is None:
        #     continue
        m = np.zeros((2, 3))

        m[0, 0] = np.cos(da)
        m[0, 1] = -np.sin(da)
        m[1, 0] = np.sin(da)
        m[1, 1] = np.cos(da)
        m[0, 2] = dx
        m[1, 2] = dy

        # Apply affine wrapping to the given frame
        frame_stabilized = cv2.warpAffine(frame, m, (w, h))
        frame_out = cv2.hconcat([frame, frame_stabilized])

        cv2.imshow("Before and After", frame_out)
        cv2.waitKey(0)
        # cv2.imwrite(stable_path + str(i) + '.png', frame_stabilized)
        out.write(frame_stabilized)
Example #48
0
def main():
    # Based on entered image path, it creates the pattern path using split and join
    desired_img_path = ""
    while not Path(desired_img_path).is_file():
        desired_img_path = str(
            input("Enter the image file path to find the camera pose for: "))

    # Splits the desired img path and uses it to create the pattern.png path (both images should be in same directory)
    desired_img_list = desired_img_path.split('/')
    pattern_path_png = '/'.join(desired_img_list[:-1]) + "/pattern.png"
    if not Path(pattern_path_png).is_file():
        print(
            "Could not find pattern.png inside of the path to the desired image."
        )
        return

    # Convert pattern.png into a jpg if it doesn't exist
    # pattern_path_jpg = pattern_path_png[:-3] + "jpg"
    # if not Path(pattern_path_jpg).is_file():
    #     # Grabs the pattern.png and then write a new file converting it into a jpg
    #     pattern_img = cv2.imread(pattern_path_png)
    #     cv2.imwrite(pattern_path_jpg, pattern_img)

    # Read in the new pattern.jpg (or pattern.png) and the desired image
    # pattern_img = cv2.imread(pattern_path_jpg, 0)
    pattern_img = cv2.imread(pattern_path_png, 0)
    desired_img = cv2.imread(desired_img_path, 0)

    # Print message start
    print("Please wait, analyzing images...")

    # Had a few options for finding KeyPoints between the pattern and the desired image
    # Decided to use sift because it seemed a bit more accurate when getting KeyPoints from different orientations
    # orb = cv2.ORB_create()
    # surf = cv2.xfeatures2d.SURF_create()
    sift = cv2.xfeatures2d.SIFT_create()

    # Obtain the KeyPoints and Descriptors for the pattern and desired image
    pattern_kp, pattern_des = sift.detectAndCompute(pattern_img, None)
    desired_kp, desired_des = sift.detectAndCompute(desired_img, None)

    # Get an array of matches between descriptors in both images and get good matches based off a threshold distance
    matches = findDescriptorMatches(pattern_des, desired_des)
    good_matches = getGoodMatchDescriptors(matches)

    # Get the points of the pattern and points of the desired image
    pattern_pts, desired_img_pts = getImagePoints(pattern_kp, desired_kp)

    # Obtain the image constant c, rotation matrix, and translation matrix from the estimated pose
    c, rotation_mat, translation_mat = getPoseMatrix(pattern_pts,
                                                     desired_img_pts,
                                                     desired_img)
    print("\nScalar: " + str(c))
    printMatrix("Rotation Matrix", rotation_mat)
    printMatrix("Translation Matrix", translation_mat)

    # Calculate the Euler Angles in radians based off of the rotation matrix
    euler_angles_mat = rotationMatrixToEulerAngles(rotation_mat)
    printAxisRotations("Axis Rotations (Degrees -180˚ to 180˚)",
                       euler_angles_mat)

    # Calculate the coordinates of the image based off the translation matrix
    printTranslations("Translations (X, Y, Z)", translation_mat[:, 0], c)

    # Print message end
    print("\nComplete.")

    # Create the KeyPoint connected image (pattern and desired image) then plot it to display it
    pattern_connected_img = cv2.drawMatchesKnn(pattern_img,
                                               pattern_kp,
                                               desired_img,
                                               desired_kp,
                                               good_matches,
                                               None,
                                               flags=2)

    plt.imshow(pattern_connected_img), plt.show()

    return
Example #49
0
def visualize(left_path, right_path, f_mat, sqResultDir):
    colors = [(255, 102, 102), (102, 255, 255), (125, 125, 125),
              (204, 229, 255), (0, 0, 204)]
    THRESHOLD = 0.2
    sift = cv2.xfeatures2d.SIFT_create()
    bf = cv2.BFMatcher()
    f_mat = np.array(f_mat.reshape((3, 3)))

    left_img = cv2.imread(left_path)
    # -------
    hl, wl = left_img.shape[0], left_img.shape[1]
    left_img = left_img[int(hl / 2) - 128:int(hl / 2) + 128,
                        int(wl / 2) - 128:int(wl / 2) + 128]
    # --------------------------

    left_imgG = cv2.cvtColor(left_img.copy(), cv2.COLOR_BGR2GRAY)
    left_img_line = left_img.copy()

    right_img = cv2.imread(right_path)
    # -------------------------------
    hr, wr = right_img.shape[0], right_img.shape[1]
    right_img = right_img[int(hr / 2) - 128:int(hr / 2) + 128,
                          int(wr / 2) - 128:int(wr / 2) + 128]

    right_imgG = cv2.cvtColor(right_img.copy(), cv2.COLOR_BGR2GRAY)
    right_img_line = right_img.copy()

    (kps_left, descs_left) = sift.detectAndCompute(left_imgG, None)
    (kps_right, descs_right) = sift.detectAndCompute(right_imgG, None)

    matches = bf.knnMatch(descs_left, descs_right, k=2)

    good = []
    for m, n in matches:
        if m.distance < THRESHOLD * n.distance:
            good.append([m])

    img3 = cv2.drawMatchesKnn(
        right_imgG,
        kps_left,
        right_imgG,
        kps_right,
        good,
        None,
        flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    cv2.imwrite(os.path.join(sqResultDir, 'feature_matching.png'), img3)

    err_l = []
    err_r = []
    img_W = left_img.shape[1] - 1
    # ---------------------------------------------------------------------
    for color_idx, g in enumerate(good):
        # get the ids of matching feature points
        id_l, id_r = g[0].queryIdx, g[0].trainIdx
        # x: column
        # y: row
        # get the feature points in both left and right images
        x_l, y_l = kps_left[id_l].pt
        x_r, y_r = kps_right[id_r].pt
        '''Color for line'''
        color = colors[color_idx % len(colors)]
        '''Epi line on the left image'''
        # epi line of right points on the left image
        point_r = np.array([x_r, y_r, 1])
        line_l = np.dot(f_mat.T, point_r)

        # calculating 2 points on the line
        y_0 = epipoline(0, line_l)
        y_1 = epipoline(img_W, line_l)
        # drawing the line and feature points on the left image
        left_img_line = cv2.circle(left_img_line, (int(x_l), int(y_l)),
                                   radius=4,
                                   color=color)
        left_img_line = cv2.line(left_img_line, (0, y_0), (img_W, y_1),
                                 color=color,
                                 lineType=cv2.LINE_AA)
        # displaying just feature points
        left_img = cv2.circle(left_img, (int(x_l), int(y_l)),
                              radius=4,
                              color=color)
        '''Epi line on the right image'''
        # epi line of left points on the right image
        point_l = np.array([x_l, y_l, 1])
        line_r = np.dot(f_mat, point_l)

        # verifying points
        err_R = verify_xfx(line_r, point_r)
        err_r.append(err_R)
        # verifying points
        err_L = verify_xfx(line_l, point_l)
        err_l.append(err_L)
        # calculating 2 points on the line
        y_0 = epipoline(0, line_r)
        y_1 = epipoline(img_W, line_r)

        # drawing the line on the right image
        right_img_line = cv2.circle(right_img_line, (int(x_r), int(y_r)),
                                    radius=4,
                                    color=color)
        right_img_line = cv2.line(right_img_line, (0, y_0), (img_W, y_1),
                                  color=color,
                                  lineType=cv2.LINE_AA)
        # displaying just feature points
        right_img = cv2.circle(right_img, (int(x_r), int(y_r)),
                               radius=4,
                               color=color)
    l_avgErr = np.average(err_l) if err_l else 0
    r_avgErr = np.average(err_r) if err_r else 0

    vis = np.concatenate((left_img_line, right_img_line), axis=0)
    font = cv2.FONT_HERSHEY_SIMPLEX

    img_H = vis.shape[0]
    x, y, w, h = 0, 0, 50, 25

    # Draw black background rectangle
    cv2.rectangle(vis, (7, 10), (w, h), (0, 0, 0), -1)
    cv2.rectangle(vis, (7, img_H - 20), (w, img_H - 7), (0, 0, 0), -1)

    cv2.putText(vis,
                '{:.4f}'.format(float(l_avgErr)), (10, 20),
                font,
                0.3,
                color=(255, 255, 255),
                lineType=cv2.LINE_AA)
    cv2.putText(vis,
                '{:.4f}'.format(float(r_avgErr)), (10, img_H - 10),
                font,
                0.3,
                color=(255, 255, 255),
                lineType=cv2.LINE_AA)

    if not os.path.exists(sqResultDir):
        os.makedirs(sqResultDir)
    print("Writing image ... " + 'epipoLine_sift.png')
    cv2.imwrite(os.path.join(sqResultDir, 'epipoLine_sift.png'), vis)
Example #50
0
sampleImage=cv2.imread(samplePath,0)
kp1, des1 = sift.detectAndCompute(sampleImage, None) #提取样本图片的特征
for parent,dirnames,filenames in os.walk(queryPath):
    for p in filenames:
        print(p)
        p=queryPath+p
        queryImage=cv2.imread(p,0)
        kp2, des2 = sift.detectAndCompute(queryImage, None) #提取比对图片的特征
        matches=flann.knnMatch(des1,des2,k=2) #匹配特征点,为了删选匹配点,指定k为2,这样对样本图的每个特征点,返回两个匹配
        (matchNum,matchesMask)=getMatchNum(matches,0.9) #通过比率条件,计算出匹配程度
        matchRatio=matchNum*100/len(matches)
        print(matchRatio)
        drawParams=dict(matchColor=(0,255,0),
                singlePointColor=(255,0,0),
                matchesMask=matchesMask,
                flags=0)
        comparisonImage=cv2.drawMatchesKnn(sampleImage,kp1,queryImage,kp2,matches,None,**drawParams)
        comparisonImageList.append((comparisonImage,matchRatio)) #记录下结果

comparisonImageList.sort(key=lambda x:x[1],reverse=True) #按照匹配度排序
count=len(comparisonImageList)
column=4
row=math.ceil(count/column)
#绘图显示
figure,ax=plt.subplots(row,column)
for index,(image,ratio) in enumerate(comparisonImageList):
    ax[int(index/column)][index%column].set_title('Similiarity %.2f%%' % ratio)
    ax[int(index/column)][index%column].imshow(image)
plt.show()
Example #51
0
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt

# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)  # or pass empty dictionary
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for i in range(len(matches))]
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
    if m.distance < 0.7 * n.distance:
        matchesMask[i] = [1, 0]
draw_params = dict(matchColor=(0, 255, 0),
                   singlePointColor=(255, 0, 0),
                   matchesMask=matchesMask,
                   flags=cv.DrawMatchesFlags_DEFAULT)
img3 = cv.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
plt.imshow(img3, ), plt.show()
Example #52
0
    def calculate_ratio(self, cur_frame, old_frame):

        # Detecting keypoints
        #old_kp, old_des = self.detect_keypoints(old_frame)
        cur_kp, cur_des = self.detect_keypoints(cur_frame)

        # Match and filter keypoints
        best = self.bf_matcher(self.old_kp, self.old_des, cur_kp, cur_des)

        # Draw rectangle to represent margin
        cur_frame = cv2.rectangle(cur_frame, (self.xmargin, self.ymargin),
                                  (cur_frame.shape[1] - self.xmargin,
                                   cur_frame.shape[0] - self.ymargin),
                                  (0, 0, 255), 2)

        # DEBUG: shows user the matches of the matcher
        if self.show_matches:
            gray1 = cv2.cvtColor(np.copy(old_frame), cv2.COLOR_BGR2GRAY)
            gray2 = cv2.cvtColor(np.copy(cur_frame), cv2.COLOR_BGR2GRAY)
            img = cv2.drawMatchesKnn(gray1,
                                     self.old_kp,
                                     gray2,
                                     cur_kp,
                                     best,
                                     None,
                                     singlePointColor=(0, 0, 255))
            cv2.imshow("Matches", img)

        # If no hull is found stop here
        if len(best) <= 3:
            self.old_kp = cur_kp
            self.old_des = cur_des
            return -1, -1

        # Calculate hull of previous frame
        old_hull = self.calculate_hull(
            self.kp_to_points(best, self.old_kp, False))
        old_hull_size = self.hull_size(old_hull)
        self.draw_hull(old_frame, old_hull)

        # Calculate hull of current frame
        cur_hull = self.calculate_hull(self.kp_to_points(best, cur_kp))
        cur_hull_size = self.hull_size(cur_hull)
        self.draw_hull(cur_frame, cur_hull)

        #Calculate hull ratio
        if cur_hull_size <= 0.0:
            hull_ratio = 0.0
        else:
            hull_ratio = cur_hull_size / old_hull_size

        # Calculate keypoint ratio
        total = 0
        best_cur_kp = []
        for m in best:
            prev_size = self.old_kp[m[0].queryIdx].size
            cur_size = cur_kp[m[0].trainIdx].size
            best_cur_kp.append(cur_kp[m[0].trainIdx])
            total += cur_size / prev_size

        kp_ratio = total / len(best)

        #DEBUG: save max kp ratio and hull ratio to show at shutdown
        if kp_ratio > self.max_kp_ratio and hull_ratio > self.max_hull_ratio:
            self.max_kp_ratio = kp_ratio
            self.max_hull_ratio = hull_ratio
            self.max_previous = old_frame
            self.max_current = cur_frame

        self.old_kp = cur_kp
        self.old_des = cur_des

        if self.show_matches:
            log.debug("Hull ratio: " + str(hull_ratio) + " KP ratio: " +
                      str(kp_ratio))

        return hull_ratio, kp_ratio
Example #53
0
matches = flann.knnMatch(des1,des2,k=2)

# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in xrange(len(matches))]

# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
    if m.distance < 0.7*n.distance:
        matchesMask[i]=[1,0]

draw_params = dict(matchColor = (0,255,0),
                   singlePointColor = (255,0,0),
                   matchesMask = matchesMask,
                   flags = 0)

img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)
plt.figure(figsize(10,10))
plt.imshow(img3,),plt.show()

# <markdowncell>

# However, sift contains position information and is comparatively slow, we'll use SURF instead. With invariant direction, only size as stars should look the same from any angle (obviously).

# <codecell>

surf = cv2.SURF(10)
surf.upright = True #Direction invariant
kp1, des1 = surf.detectAndCompute(img1,None)
kp2, des2 = surf.detectAndCompute(img2,None)
#for kps in kp1:
#    print "x: " + str(kps.pt[0]) + " y: " + str(kps.pt[1]) + " Size: " + str(kps.size) + " Octave: " \
Example #54
0
matches = flann.knnMatch(descriptors1, descriptors2, k=2)

matchesMask = [[0, 0] for i in range(len(matches))]
#Apply ratio test
good = []
good_pts = []
for i, (m, n) in enumerate(matches):
    if m.distance < 0.75 * n.distance:
        good.append([m])
        good_pts.append(m)

featureMacthing = cv2.drawMatchesKnn(image1,
                                     detected_points1,
                                     image2,
                                     detected_points2,
                                     good,
                                     None,
                                     flags=2)

#=======================================part3==============================================================================
src_pts = np.float32([detected_points1[m.queryIdx].pt
                      for m in good_pts]).reshape(-1, 1, 2)
dst_pts = np.float32([detected_points2[m.trainIdx].pt
                      for m in good_pts]).reshape(-1, 1, 2)
print(src_pts.shape)
print(dst_pts.shape)
homography, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC)
#print(mask)
#=====================================part4================================================================================
inliner = []
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img1 = cv.imread('box.png', 0)  # queryImage
img2 = cv.imread('boxinscene.png', 0)  # trainImage
# Initiate SIFT detector
sift = cv.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# BFMatcher with default params
bf = cv.BFMatcher()

matches = bf.knnMatch(des1, des2, k=2)
# Apply ratio test
good = []
for m, n in matches:
    if m.distance < 0.75 * n.distance:
        good.append([m])
# cv.drawMatchesKnn expects list of lists as matches.
img3 = cv.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
plt.imshow(img3), plt.show()
Example #56
0
        def BFSIFT():
            connectdb = sqlite3.connect("results.db")
            cursor = connectdb.cursor()

            #  img1 = cv2.imread("1.png")
            #  img11 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
            img1 = cv2.imread('1.png', cv2.IMREAD_GRAYSCALE)
            imageA = cv2.resize(img1, (450, 237))
            database = os.listdir("db")

            for image in database:
                img2 = cv2.imread("db/" + image)

                imgprocess = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

                imageB = cv2.resize(imgprocess, (450, 237))

                matcheslist = ""

                # Initiate SIFT detector
                sift = cv2.xfeatures2d.SIFT_create()
                # find the keypoints and descriptors with SIFT
                kp1, des1 = sift.detectAndCompute(imageA, None)
                kp2, des2 = sift.detectAndCompute(imageB, None)

                # BFMatcher with default params
                bf = cv2.BFMatcher()
                matches = bf.knnMatch(des1, des2, k=2)
                # Apply ratio test
                good = []
                for m, n in matches:
                    if m.distance < 0.75 * n.distance:
                        good.append([m])
                # cv.drawMatchesKnn expects list of lists as matches.

                amount = len(good)
                print('Comparing input image to ' + image + " using BFSIFT")

                title = "Comparing"
                fig = plt.figure(title)

                cursor.execute(
                    "INSERT INTO BFSIFT (percentage, filename) VALUES (?, ?);",
                    (amount, image))
                connectdb.commit()

            percentages = list(connectdb.cursor().execute(
                "SELECT * FROM BFSIFT order by percentage desc limit 10"))
            print(percentages[0])
            highest = percentages[0]

            # getting number of matches
            highestperct = round(highest[0], 2)
            print(highestperct)

            # getting file name of highest similarity
            filename = highest[1]
            print(filename)

            img1 = cv2.imread('1.png', cv2.IMREAD_GRAYSCALE)  # input image
            img2 = cv2.imread('db/' + filename,
                              cv2.IMREAD_GRAYSCALE)  # closet image

            # Initiate SIFT detector
            sift = cv2.xfeatures2d.SIFT_create()

            # find the keypoints and descriptors with SIFT
            kp1, des1 = sift.detectAndCompute(img1, None)
            kp2, des2 = sift.detectAndCompute(img2, None)

            # BFMatcher with default params
            bf = cv2.BFMatcher()
            matches = bf.knnMatch(des1, des2, k=2)

            # Apply ratio test
            good = []
            for m, n in matches:
                if m.distance < 0.75 * n.distance:
                    good.append([m])

            # cv.drawMatchesKnn expects list of lists as matches.
            print(good)
            print(kp1)
            print(kp2)

            plt.suptitle("Amount of matches : " + str(highestperct))
            disease = filename[:-4]
            txt = "Results: \n - " + filename + "\n - " + disease
            plt.text(0.40, 0.20, txt, transform=fig.transFigure, size=11)

            drawmatches = cv2.drawMatchesKnn(img1,
                                             kp1,
                                             img2,
                                             kp2,
                                             good,
                                             None,
                                             flags=2)
            plt.imshow(drawmatches), plt.axis("off"), plt.show()

            cursor.execute("DELETE FROM BFSIFT")
            connectdb.commit()
Example #57
0
import cv2
import numpy as np
import random

image = cv2.imread('image.jpg')
image_rot = cv2.imread('image_rot.jpg')
gray= cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
gray_rot = cv2.cvtColor(image_rot,cv2.COLOR_BGR2GRAY)

surf = cv2.xfeatures2d.SURF_create()

kp, desc = surf.detectAndCompute(gray,None)
kp_rot, desc_rot = surf.detectAndCompute(gray_rot, None)

# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(desc,desc_rot, k=2)

# Apply ratio test
good = []
for m,n in matches:
    if m.distance < 0.4*n.distance:
        good.append([m])
random.shuffle(good)

# cv2.drawMatchesKnn expects list of lists as matches.
image_match = cv2.drawMatchesKnn(image,kp,image_rot,kp_rot,good[:10],flags=2, outImg=None)

cv2.imwrite('surf_matches.jpg',image_match)
transform_par.trans_y = 0
transform_par.resize = 150
transform_par.trans_type = 2  #Options:	2 = Rotation, 3 = Resize

### Transformation1: Rotation
img2 = image_transformation(img1, transform_par)  #Transformation de l'image
List_I1Coordinates, List_I2Coordinates, matches, pts1, pts2, gray1, gray2, good = calcul_keypoints(
    img1, img2)  #Calcul des point d'interets
list_qCoordinates = point_transformation(
    img1, List_I1Coordinates,
    transform_par)  #Calcule la qualité de les appariements
quality_factor = calcule_quality(list_qCoordinates, List_I2Coordinates,
                                 transform_par)
print("Qualité des appariements: %.2f%%" % (quality_factor))

# Affichage des appariements qui respectent le ratio test
draw_params = dict(matchColor=(0, 255, 0),
                   singlePointColor=(255, 0, 0),
                   flags=0)
img3 = cv2.drawMatchesKnn(gray1, pts1, gray2, pts2, good, None, **draw_params)

Nb_ok = len(good)
plt.imshow(img3), plt.title('%i appariements OK' % Nb_ok)
plt.show()

print('Le numero d appariements OK de l image tourné est %i' % Nb_ok)

print(
    'Le rapport entre les appariements bons et le numero total d appariements est %f'
    % (Nb_ok / len(matches)))
Example #59
0
import cv2

img1 = cv2.imread(r'C:\Users\mueda\Documents\S__41476104.jpg')
img2 = cv2.imread(r'C:\Users\mueda\Documents\S__41476106.jpg')
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
ratio = 0.5
good = []
for m, n in matches:
    if m.distance < ratio * n.distance:
        good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[:100], None, flags=2)
cv2.imshow('img', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('sift_matching.jpg', img3)
Example #60
0
def main(args):

    # Read the file containing the pairs used for testing
    pairs = read_pairs(os.path.expanduser(args.lfw_pairs))

    # Get the paths for the corresponding images
    paths, actual_issame = get_paths(os.path.expanduser(args.patch_dir), pairs)

    print(paths[0])
    print(paths[1])
    print(actual_issame[0])

    img1 = cv2.imread(paths[0], cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(paths[1], cv2.IMREAD_GRAYSCALE)

    sift = cv2.xfeatures2d.SIFT_create()
    # surf = cv2.xfeatures2d.SURF_create()

    # create a mask image filled with zeros, the size of original image
    mask = np.zeros(img1.shape[:2], dtype=np.uint8)
    # draw your selected ROI on the mask image
    cv2.rectangle(mask, (24, 24), (40, 40), (255), thickness=-1)

    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    # Apply ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append([m])
    print(good)
    # cv2.drawMatchesKnn expects list of lists as matches.
    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None)

    cv2.imshow("matches", img3)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    print(paths[400])
    print(paths[401])
    print(actual_issame[200])

    img1 = cv2.imread(paths[400], cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(paths[401], cv2.IMREAD_GRAYSCALE)

    sift = cv2.xfeatures2d.SIFT_create()
    # surf = cv2.xfeatures2d.SURF_create()

    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # Apply ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append([m])

    # cv2.drawMatchesKnn expects list of lists as matches.
    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, img2)

    cv2.imshow("dismatch", img3)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    pass