def compute_feature(): features_path = "/home/ashish/dip/chap7/features_test/" images_path = "/home/ashish/dip/chap7/images_test/" images = imlist.get_imlist(images_path) #Get the list of images nbr_images = len(images) kplist = [] deslist = [] for img in images: #Create KP and desc files for each image img_txt = os.path.splitext(img)[0] kp_file = features_path + img_txt + "kp" + ".txt" des_file = features_path + img_txt + "des" + ".txt" kplist.append(kp_file) deslist.append(des_file) kp, des = SIFT.SIFT(images_path + img) np.savetxt(kp_file, kp, fmt="%s") np.savetxt(des_file, des, fmt="%s") return deslist
def __init__(self, model_name, folder_path=None): self.model_name = model_name self.folder_path = folder_path self.split_windows = False self.model = None if self.model_name == 'LBP': self.model = LBP.LocalBinaryPatterns(8, 1) self.split_windows = True elif self.model_name == 'HOG': self.model = HOG.Hog(orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2)) elif self.model_name == 'CM': self.model = ColorMoments.ColorMoments() self.split_windows = True elif self.model_name == 'SIFT': self.model = SIFT.SIFT()
def compareEfficiency(): folder = "DibujosMuestra/dataset2_reyCopia/reyCopia" imgs = load_imgs(folder) t_0 = time() for img in imgs: sift = SIFT.SIFT({ "s": 3, #Pg 9 of Lowe's paper "sigma": 1.6, #Pg 10 of Lowe's paper "visual_debug": True, "img_name": "/home/alberto/Documents/CV/M0_SIFT/fotonoticia_20200402133510_420.jpg", #Only works if img is not defined "img": img, "assumed_blur": 0.5, #Pg 10 of Lowe's paper "detection_threshold": 10, #??? "contrast_threshold": 0.04, "eigenvalue_ratio": 10, "convergence_attempts": 5, "image_border_width": 5, "radius_factor": 3, "num_bins": 36, "peak_ratio": 0.8, "scale_factor": 1.5, "float_tol": 1e-7, "window_width": 4, "num_bins_descriptor": 8, "scale_multiplier": 3, "descriptor_max_val": 0.2 }) sift.calculateKeyPoints() print(f"The custom implementation needed {(time()-t_0)*1000} miliseconds") t_0 = time() for img in imgs: sift = cv2.SIFT_create() kp = sift.detect(img, None) print(f"The OpenCV implementation needed {(time()-t_0)*1000} miliseconds")
# Calculando tempo em milisegundos lr2m_times.append(round((end - start) * 1000, 2)) # Executando algoritmo com RGB start = t.time() image = lr2m.lr2MatchingRGB(A, B) end = t.time() lr2m.saveImage(testsPath + "LR2M_RGB_" + i[0], image) # Calculando tempo em milisegundos lr2mRGB_times.append(round((end - start) * 1000, 2)) # Executando SIFT start = t.time() image = sift.SIFT(A_SIFT, B_SIFT) end = t.time() sift.saveImage(testsPath + "SIFT_" + i[0], image) # Calculando tempo em milisegundos SIFT_times.append(round((end - start) * 1000, 2)) # Definindo valores para gráfico testCount += 1 values.append("T" + str(testCount)) # print(values) # print(lr2m_times) # print(lr2mRGB_times) # print(SIFT_times)
if matches_21[int(matches_12[n])] != n: matches_12[n] = 0 return matches_12 for img in images: img_txt = os.path.splitext(img)[0] featlist.append(img_txt + "feat" + ".txt") deslist.append(img_txt + "des" + ".txt") feature_file = img_txt + "feat" + ".txt" des_file = img_txt + "des" + ".txt" f_kp = open(img_txt + "feat" + ".txt", 'w+') f_des = open(img_txt + "des" + ".txt", 'w+') kp, des = SIFT.SIFT(path + img) np.savetxt(feature_file, kp, fmt="%s") np.savetxt(des_file, des, fmt="%s") nbr_images = len(images) matchscores = np.zeros((nbr_images, nbr_images)) for i in range(nbr_images): for j in range(i, nbr_images): #print 'comparing', imlist[i], imlist[j] l1 = read_features_from_file(featlist[i]) l2 = read_features_from_file(featlist[j]) d1 = read_features_from_file(deslist[i]) d2 = read_features_from_file(deslist[j]) des1 = d1.astype(np.float)
"contrast_threshold":0.04, "eigenvalue_ratio":10, "img":None, "convergence_attempts":5, "image_border_width":5, "radius_factor":3, "num_bins":36, "peak_ratio":0.8, "scale_factor":1.5, "float_tol":1e-7, "window_width":4, "num_bins_descriptor":8, "scale_multiplier":3, "descriptor_max_val":0.2 } if __name__=="__main__": sift=SIFT.SIFT(params) sift.calculateKeyPoints() #print(sift.DoG) for i,octave in enumerate(sift.octaves): for j,img in enumerate(octave): im = Image.fromarray(img).convert('RGB') im.save(f'imagesTest/Octave{i}img{j}.jpg') for i,octave in enumerate(sift.DoG): for j,img in enumerate(octave): im = Image.fromarray(img*25).convert('RGB') im.save(f'imagesTest/DoGOctave{i}img{j}.jpg')
# -*- coding: utf-8 -*- """ Created on Sun Apr 21 17:12:04 2019 @author: VÕQUỐCHUY """ import numpy as np import cv2 import sys import convolution import Harris import SIFT img = cv2.imread("./Input/flowers.jpeg", 0) siftDetector = SIFT.SIFT(img, 3, 0.707, np.sqrt(2), 5, 4) listOfOctaves = siftDetector.createImgs_AtMultipleOctaves() #listOfDoG = siftDetector.calDOG_AtMultipleOctaves() #listOfKeypoints = siftDetector.findApproxKeypoints() listOfKeypoints = siftDetector.orientationAssignmentAndKeypointDescription() #cv2.namedWindow('output',cv2.WINDOW_AUTOSIZE) #cv2.imshow('output',img) '''for i in range(len(listOfOctaves)): for j in range(len(listOfOctaves[i])): cv2.namedWindow('Output at octave'+str(i)+' scale '+str(j),cv2.WINDOW_AUTOSIZE) cv2.imshow('Output at octave'+str(i)+' scale '+str(j),listOfOctaves[i][j][0])''' '''for i in range(len(listOfDoG)): for j in range(len(listOfDoG[i])): cv2.namedWindow('Output at octave'+str(i)+' scale '+str(j),cv2.WINDOW_AUTOSIZE)