def matchImages(original, image_to_compare, useFlann=False): if useRootSIFT: # extract RootSIFT descriptors #print('Using RootSIFT') kps = None rs = RootSIFT() kp_1, desc_1 = rs.compute(original, kps) kp_2, desc_2 = rs.compute(image_to_compare, kps) else: sift = cv2.xfeatures2d.SIFT_create() kp_1, desc_1 = sift.detectAndCompute(original, None) kp_2, desc_2 = sift.detectAndCompute(image_to_compare, None) if useFlann: FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) print('Using Flann Matcher') matcher = cv2.FlannBasedMatcher(index_params, search_params) else: print('Using Brute Force Matcher') matcher = cv2.DescriptorMatcher_create("BruteForce") matches = matcher.knnMatch(desc_1, desc_2, k=2) good_points = [] for m, n in matches: if m.distance < LRatio * n.distance: good_points.append(m) print(len(good_points)) result = cv2.drawMatches(original, kp_1, image_to_compare, kp_2, good_points, None) # cv2.imshow("result", cv2.resize(result,(800,600))) return (good_points, result)
def getRootSIFT(gray): sift = cv2.xfeatures2d.SIFT_create() kp, des = sift.detectAndCompute(gray, None) # extract RootSIFT descriptors rs = RootSIFT() kp, des = rs.compute(gray, kp) return kp, des
def detectAndDescribe(self, image, useRootSIFT=False): # convert the image to grayscale # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # check to see if we are using OpenCV 3.X if self.isv3: if useRootSIFT: # extract RootSIFT descriptors # print('Using RootSIFT') kps = None rs = RootSIFT() (kps, features) = rs.compute(image, kps) else: # detect and extract features from the image descriptor = cv2.xfeatures2d.SIFT_create() # descriptor = cv2.xfeatures2d.SURF_create() # descriptor = cv2.ORB_create() (kps, features) = descriptor.detectAndCompute(image, None) # otherwise, we are using OpenCV 2.4.X else: # detect keypoints in the image detector = cv2.FeatureDetector_create("SIFT") kps = detector.detect(image) # extract features from the image extractor = cv2.DescriptorExtractor_create("SIFT") (kps, features) = extractor.compute(image, kps) # convert the keypoints from KeyPoint objects to NumPy # arrays kps = np.float32([kp.pt for kp in kps]) # return a tuple of keypoints and features return (kps, features)
def get_local_features(params, image): # detect Difference of Gaussian keypoints in the image detector = cv2.FeatureDetector_create(params["keypoint_type"]) kps = detector.detect(image) # extract RootSIFT descriptors rs = RootSIFT() (kps, descs) = rs.compute(image, kps) # Devolvemos los descriptores para una imagen return descs
from __future__ import print_function from rootsift import RootSIFT import argparse import cv2 import imutils # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="Path to the image") args = vars(ap.parse_args()) # load the input image, convert it to grayscale image = cv2.imread(args["image"]) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # extract local invariant descriptors extractor = RootSIFT() (kps, descs) = extractor.compute(gray, None) # show the shape of the keypoints and local invariant descriptors array print("[INFO] # of keypoints detected: {}".format(len(kps))) print("[INFO] feature vector shape: {}".format(descs.shape))
# it to grayscale image = cv2.imread("all_souls_000035.jpg") gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # detect Difference of Gaussian keypoints in the image detector = cv2.FeatureDetector_create("SIFT") kps = detector.detect(image) # extract normal SIFT descriptors extractor = cv2.DescriptorExtractor_create("SIFT") (kps_sift, descs_sift) = extractor.compute(image, kps) print "SIFT: kps=%d, descriptors=%s " % (len(kps_sift), descs_sift.shape) # extract RootSIFT descriptors rs = RootSIFT() (kps_rootsift, descs_rootsift) = rs.compute(image, kps) print "RootSIFT: kps=%d, descriptors=%s " % (len(kps_rootsift), descs_rootsift.shape) pylab.figure() rgbImage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) img_SIFT = cv2.drawKeypoints(rgbImage,kps_sift,None,(255,0,255),4) pylab.gray() pylab.subplot(2,1,1) pylab.imshow(img_SIFT) pylab.axis('off') img_rootsift = cv2.drawKeypoints(rgbImage,kps_rootsift,None,(255,0,255),4) pylab.gray() pylab.subplot(2,1,2) pylab.imshow(img_rootsift)
# USAGE # python driver.py # import the necessary packages from rootsift import RootSIFT import cv2 # load the image we are going to extract descriptors from and convert # it to grayscale image = cv2.imread("example.png") gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # detect Difference of Gaussian keypoints in the image detector = cv2.FeatureDetector_create("SIFT") kps = detector.detect(gray) # extract normal SIFT descriptors extractor = cv2.DescriptorExtractor_create("SIFT") (kps, descs) = extractor.compute(gray, kps) print("SIFT: kps=%d, descriptors=%s " % (len(kps), descs.shape)) # extract RootSIFT descriptors rs = RootSIFT() (kps, descs) = rs.compute(gray, kps) print("RootSIFT: kps=%d, descriptors=%s " % (len(kps), descs.shape))
# import the necessary packages from rootsift import RootSIFT import cv2 # load the image we are going to extract descriptors from and convert # it to grayscale image = cv2.imread("example.jpg") gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # detect Difference of Gaussian keypoints in the image detector = cv2.FeatureDetector_create("SIFT") kps = detector.detect(image) # extract normal SIFT descriptors extractor = cv2.DescriptorExtractor_create("SIFT") (kps, descs) = extractor.compute(image, kps) print "SIFT: kps=%d, descriptors=%s " % (len(kps), descs.shape) # extract RootSIFT descriptors rs = RootSIFT() (kps, descs) = rs.compute(image, kps) print "RootSIFT: kps=%d, descriptors=%s " % (len(kps), descs.shape)
# import the necessary packages from rootsift import RootSIFT import cv2 # load the image we are going to extract descriptors from and convert # it to grayscale image = cv2.imread("example.png") gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # detect Difference of Gaussian keypoints in the image detector = cv2.FeatureDetector_create("SIFT") kps = detector.detect(image) # extract normal SIFT descriptors extractor = cv2.DescriptorExtractor_create("SIFT") (kps, descs) = extractor.compute(image, kps) print "SIFT: kps=%d, descriptors=%s " % (len(kps), descs.shape) # extract RootSIFT descriptors rs = RootSIFT() (kps, descs) = rs.compute(image, kps) print "RootSIFT: kps=%d, descriptors=%s " % (len(kps), descs.shape)
# detect Difference of Gaussian keypoints in the image detector = cv2.xfeatures2d.SIFT_create() kps1 = detector.detectAndCompute(gray1, None) kps2 = detector.detectAndCompute(gray2, None) # extract normal SIFT descriptors extractor = cv2.xfeatures2d.SIFT_create() (kps1, descs1) = extractor.detectAndCompute(gray1, None) (kps2, descs2) = extractor.detectAndCompute(gray2, None) print "SIFT: kps=%d, descriptors=%s " % (len(kps1), descs1.shape) print "SIFT: kps=%d, descriptors=%s " % (len(kps2), descs2.shape) # extract RootSIFT descriptors rs = RootSIFT() (kps1, descs1, kps2, descs2) = rs.compute(gray1, kps1, gray2, kps2) print "RootSIFT: kps=%d, descriptors=%s " % (len(kps1), descs1.shape) print "RootSIFT: kps=%d, descriptors=%s " % (len(kps2), descs2.shape) print "========================================================" print "Slika poklapanja:" print "========================================================" # FLANN parameters FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params,search_params) matches = flann.knnMatch(descs1,descs2,k=2)
# extract RootSIFT descriptors rs = RootSIFT() #descriptor = cv2.xfeatures2d.SIFT_create() matcher = cv2.DescriptorMatcher_create("BruteForce") objKeyPtList = [] objFtrList = [] for obj in object_list: imageA = cv2.imread(obj) # cv2.imshow("Image A", imageA) grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY) # detect and extract features from the image (kps1, featuresA) = rs.compute(grayA, None) # (kps1, featuresA) = descriptor.detectAndCompute(grayA, None) if type(featuresA) != type(None): kpsA = np.float32([kp.pt for kp in kps1]) objKeyPtList.append(kpsA) objFtrList.append(featuresA) print('Total objects found:', len(objFtrList)) # grab a reference to the video file vs = cv2.VideoCapture(args["video"]) # allow the camera or video file to warm up time.sleep(2.0)
def readFromFile(testImagePath): imgTest = cv2.imread(testImagePath) imgPath = 'data_images' testImagePath = 'test_images' detector = cv2.xfeatures2d.SIFT_create() #inicijalizacija detektora za kljucne tacke extractor = cv2.xfeatures2d.SIFT_create() #inicijalizacija ekstraktora preko koga cemo uzimati osobine slike - deskriptor rs = RootSIFT() #====================================================================== #funkcija koja nalazi broj pozitivnih preklapanja kljucnih tacaka def findPosMatches(dscTest, dscData): pozPoklapanja = [] # FLANN parameters FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 100) flann = cv2.FlannBasedMatcher(index_params,search_params) matches = flann.knnMatch(dscTest,dscData,k=2) # Need to draw only good matches, so create a mask matchesMask = [[0,0] for i in xrange(len(matches))] #pozPoklapanja = [] # ratio test as per Lowe's paper for i,(m,n) in enumerate(matches): if m.distance < 0.7*n.distance: matchesMask[i]=[1,0] pozPoklapanja.append(len(matchesMask[i])) x = len(pozPoklapanja) return x, matches, matchesMask #====================================================================== #obrada ucitane slike gray2 = cv2.cvtColor(imgTest, cv2.COLOR_BGR2GRAY) kptsTest = detector.detectAndCompute(gray2, None) (kptsTest, dscTest) = extractor.detectAndCompute(gray2, None) (kptsTest, dscTest) = rs.compute(gray2, kptsTest) # inicijalizacija osnovnih parametara u koje cu smestati podatke one slike # koja ima najvise pozitivnih poklapanja kljucnih tacaka maxBrojPozPoklapanja = 0 kptsDataBest = [] poklapanja = [] slikaData = imgTest matchesMask = [] #obrada slika iz dataseta for imgPath in glob.glob(folderPath + '/*.jpg'): imgData = cv2.imread(imgPath) gray1 = cv2.cvtColor(imgData, cv2.COLOR_BGR2GRAY) kptsData = detector.detectAndCompute(gray1, None) (kptsData, dscData) = extractor.detectAndCompute(gray1, None) (kptsData, dscData) = rs.compute(gray1, kptsData) brPozPoklapanjaSlike, matchesData, matchesMaskData = findPosMatches(dscTest, dscData) if brPozPoklapanjaSlike > maxBrojPozPoklapanja: #ukoliko slika ima najvise pozitivnih poklapanja maxBrojPozPoklapanja = brPozPoklapanjaSlike #ona postaje glavni kandidat za "pobednika" poklapanja = matchesData slikaData = imgData kptsDataBest = kptsData matchesMask = matchesMaskData print "Slika poklapanja:" draw_params = dict(matchColor = (0,255,0), singlePointColor = (255,0,0), matchesMask = matchesMask, flags = 0) img3 = cv2.drawMatchesKnn(imgTest,kptsTest,slikaData,kptsDataBest,poklapanja,None,**draw_params) plt.imshow(img3,),plt.show() return img3, slikaData
# it to grayscale image = cv2.imread("all_souls_000035.jpg") gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # detect Difference of Gaussian keypoints in the image detector = cv2.FeatureDetector_create("SIFT") kps = detector.detect(image) # extract normal SIFT descriptors extractor = cv2.DescriptorExtractor_create("SIFT") (kps_sift, descs_sift) = extractor.compute(image, kps) print "SIFT: kps=%d, descriptors=%s " % (len(kps_sift), descs_sift.shape) # extract RootSIFT descriptors rs = RootSIFT() (kps_rootsift, descs_rootsift) = rs.compute(image, kps) print "RootSIFT: kps=%d, descriptors=%s " % (len(kps_rootsift), descs_rootsift.shape) pylab.figure() rgbImage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) img_SIFT = cv2.drawKeypoints(rgbImage, kps_sift, None, (255, 0, 255), 4) pylab.gray() pylab.subplot(2, 1, 1) pylab.imshow(img_SIFT) pylab.axis('off') img_rootsift = cv2.drawKeypoints(rgbImage, kps_rootsift, None, (255, 0, 255), 4) pylab.gray()