def set_keypoints(): [detected_keypoints1, descriptors1] = detect_keypoints(image_pathes[0], 5) [detected_keypoints2, descriptors2] = detect_keypoints(image_pathes[1], 5) [detected_keypoints3, descriptors3] = detect_keypoints(image_pathes[2], 5) [detected_keypoints4, descriptors4] = detect_keypoints(image_pathes[3], 5) with open('keypoints.pkl', 'w') as f: pickle.dump([detected_keypoints1, descriptors1,detected_keypoints2,descriptors2,detected_keypoints3,descriptors3,detected_keypoints4,descriptors4], f)
def match_template(imagename, templatename, threshold, cutoff): img = cv2.imread(imagename) template = cv2.imread(templatename) [kpi, di] = detect_keypoints(imagename, threshold) [kpt, dt] = detect_keypoints(templatename, threshold) flann_params = dict(algorithm=1, trees=4) flann = cv2.flann_Index(np.asarray(di, np.float32), flann_params) idx, dist = flann.knnSearch(np.asarray(dt, np.float32), 1, params={}) del flann dist = dist[:,0]/2500.0 dist = dist.reshape(-1,).tolist() idx = idx.reshape(-1).tolist() indices = range(len(dist)) indices.sort(key=lambda i: dist[i]) dist = [dist[i] for i in indices] idx = [idx[i] for i in indices] kpi_cut = [] for i, dis in itertools.izip(idx, dist): print ("distance: " + str(dis)) if dis < cutoff: kpi_cut.append(kpi[i]) else: break kpt_cut = [] for i, dis in itertools.izip(indices, dist): print ("distance: " + str(dis)) if dis < cutoff: kpt_cut.append(kpt[i]) else: break h1, w1 = img.shape[:2] h2, w2 = template.shape[:2] nWidth = w1 + w2 nHeight = max(h1, h2) hdif = (h1 - h2) / 2 newimg = np.zeros((nHeight, nWidth, 3), np.uint8) newimg[hdif:hdif+h2, :w2] = template newimg[:h1, w2:w1+w2] = img for i in range(min(len(kpi), len(kpt))): pt_a = (int(kpt[i,1]), int(kpt[i,0] + hdif)) pt_b = (int(kpi[i,1] + w2), int(kpi[i,0])) cv2.line(newimg, pt_a, pt_b, (255, 0, 0)) cv2.imwrite('matches.jpg', newimg)
def match_template(imagename, templatename, threshold, cutoff): img = cv2.imread(imagename) template = cv2.imread(templatename) [kpi, di] = detect_keypoints(imagename, threshold) [kpt, dt] = detect_keypoints(templatename, threshold) flann_params = dict(algorithm=1, trees=4) flann = cv2.flann_Index(np.asarray(di, np.float32), flann_params) idx, dist = flann.knnSearch(np.asarray(dt, np.float32), 1, params={}) del flann dist = dist[:,0]/2500.0 dist = dist.reshape(-1,).tolist() idx = idx.reshape(-1).tolist() indices = range(len(dist)) indices.sort(key=lambda i: dist[i]) dist = [dist[i] for i in indices] idx = [idx[i] for i in indices] kpi_cut = [] for i, dis in itertools.izip(idx, dist): if dis < cutoff: kpi_cut.append(kpi[i]) else: break kpt_cut = [] for i, dis in itertools.izip(indices, dist): if dis < cutoff: kpt_cut.append(kpt[i]) else: break h1, w1 = img.shape[:2] h2, w2 = template.shape[:2] nWidth = w1 + w2 nHeight = max(h1, h2) hdif = (h1 - h2) / 2 newimg = np.zeros((nHeight, nWidth, 3), np.uint8) newimg[hdif:hdif+h2, :w2] = template newimg[:h1, w2:w1+w2] = img for i in range(min(len(kpi), len(kpt))): pt_a = (int(kpt[i,1]), int(kpt[i,0] + hdif)) pt_b = (int(kpi[i,1] + w2), int(kpi[i,0])) cv2.line(newimg, pt_a, pt_b, (255, 0, 0)) cv2.imwrite('matches.jpg', newimg)
def match_template(imagename, pos): img = cv2.imread(imagename) kpi = detect_keypoints(imagename) fichier = open(pos + "/sift.txt", "a") #print(kpi) if kpi is not None: fichier.write("\n" + str(Id) + "," + str(Class[int(pos) - 1]) + ",") f, n = 0, 0 for i in kpi: f += 1 for y in i: n += 1 if (f == len(kpi)) & (n == len(i)): fichier.write(str(y)) else: fichier.write(str(y) + ",") fichier.close() """else:
keypoints_kitti11_94 = np.loadtxt('Kitti11_94_Keypoints', delimiter=',') descriptors_kitti11_94 = np.loadtxt('Kitti11_94_Descriptors', delimiter=',') keypoints_kitti11_94_cv2 = to_cv2_kplist(keypoints_kitti11_94) descriptors_kitti11_94_cv2 = to_cv2_di(descriptors_kitti11_94) print("Anzahl Keypoints in Kitti11_94: " + str(len(keypoints_kitti11_94_cv2))) cv2.drawKeypoints(gray_Kitti11_94, keypoints_kitti11_94_cv2, kitti11_94_img, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.imwrite('Kitti11_94_Keypoints.png', kitti11_94_img) [keypoints_kitti11_96_raw, descriptors_kitt11_96_raw] = detect_keypoints(imagesKitti11_paths[1], 5) np.savetxt('Kitti11_96_Keypoints', keypoints_kitti11_96_raw, delimiter=',') np.savetxt('Kitti11_96_Descriptors', descriptors_kitt11_96_raw, delimiter=',') keypoints_kitti11_96 = np.loadtxt('Kitti11_96_Keypoints', delimiter=',') descriptors_kitt11_96 = np.loadtxt('Kitti11_96_Descriptors', delimiter=',') keypoints_kitti11_96_cv2 = to_cv2_kplist(keypoints_kitti11_96) descriptors_kitti11_96_cv2 = to_cv2_di(descriptors_kitt11_96) print("Anzahl Keypoints in Kitti11_96: " + str(len(keypoints_kitti11_96_cv2))) cv2.drawKeypoints(gray_Kitti11_96, keypoints_kitti11_96_cv2, kitti11_96_img, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.imwrite('Kitti11_96_Keypoints.png', kitti11_96_img) [keypoints_kitti14_left_raw, descriptors_kitt14_left_raw] = detect_keypoints(imagesKitti14_paths[0], 5)
def find_descriptor(path): resize_image(path) kp, target_des = detect_keypoints(path, 0.01) return target_des
import cv2 from siftdetector import detect_keypoints img1 = cv2.imread('iiitb.jpg', cv2.COLOR_BGR2GRAY) # queryImage [keypoints, descriptors] = detect_keypoints(imagename, 15) cv2.imshow("a", keypoints) cv2.waitKey(0)