def CompareRANSACWithout(name, img1: ImageWithPoints, img2: ImageWithPoints): pts_match_1, pts_match_2, match_good = ho.matchKeypoints(img1.kp, img1.des, img2.kp, img2.des) H_0, mask_0 = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=0,learning_rate=0.3) printResults(name+"|with", H_0, pts_match_1, pts_match_2, mask_0) error_ransac = ho.distanceError(H_0, pts_match_1, pts_match_2, mask_0).mean() H_0 = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) z = 0 with ho.Graph() as graph: while True: z += 10 H_0, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=10, learning_rate=0.3, method=0, H=H_0, mask=mask_0, graph = graph) error = ho.distanceError(H_0, pts_match_1, pts_match_2, mask_0).mean() if error < error_ransac: break printResults(name+"|without", H_0, pts_match_1, pts_match_2, mask_0) print("Epochs: "+str(z))
def video(img1: ImageWithPoints, img2: ImageWithPoints): pts_match_1, pts_match_2, match_good = ho.matchKeypoints(img1.kp, img1.des, img2.kp, img2.des) H, mask = ho.findHomography(pts_match_1, pts_match_2, 2, epochs=0, learning_rate=0.3, method=2) H = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) with ho.Graph() as graph: for i in range(0, 100): if i < 50: H, mask = ho.findHomography(pts_match_1, pts_match_2, 2, epochs=1, learning_rate=0.15, H=H, mask=mask, method=0, graph=graph, normalization=1) image = ho.polyline(ho.project(H, img1.points), img2.image, color=(0, 0, 255)) else: H, mask = ho.findHomography(pts_match_1, pts_match_2, 2, epochs=5, learning_rate=0.3, H=H, mask=mask, method=0, graph=graph, normalization=1) image = ho.polyline(ho.project(H, img1.points), img2.image, color=(0, 255, 0)) image2 = cv2.warpPerspective(img1.image, H, (image.shape[1], image.shape[0])) image3 = image2/256*0.25 + image/256*0.75 cv2.imshow('frame4', image3) cv2.imshow('image', image) k = cv2.waitKey(10) & 0xFF if k == 27: break
def compareLearningRate2(name, img1: ImageWithPoints, img2: ImageWithPoints): pts_match_1, pts_match_2, match_good = ho.matchKeypoints(img1.kp, img1.des, img2.kp, img2.des) _, mask = ho.findHomographyCV(pts_match_1, pts_match_2, 4) H = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=200, method=0, H=H, mask=mask, learning_rate=0.3) printResults(name+"|lr=0.3|200", H, pts_match_1, pts_match_2, mask) H = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=500, method=0, H=H, mask=mask, learning_rate=0.3) printResults(name+"|lr=0.3|500", H, pts_match_1, pts_match_2, mask) H = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=200, method=0, H=H, mask=mask, learning_rate=0.5) printResults(name+"|lr=0.5|200", H, pts_match_1, pts_match_2, mask) H = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=500, method=0, H=H, mask=mask, learning_rate=0.5) printResults(name+"|lr=0.5|500", H, pts_match_1, pts_match_2, mask) H = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=200, method=0, H=H, mask=mask, learning_rate=0.7) printResults(name+"|lr=0.7|200", H, pts_match_1, pts_match_2, mask) H = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=500, method=0, H=H, mask=mask, learning_rate=0.7) printResults(name+"|lr=0.7|500", H, pts_match_1, pts_match_2, mask)
def compareEpochs(name, img1: ImageWithPoints, img2: ImageWithPoints): pts_match_1, pts_match_2, match_good = ho.matchKeypoints(img1.kp, img1.des, img2.kp, img2.des) H_0, mask_0 = ho.findHomography(pts_match_1, pts_match_2, 2, epochs=0, learning_rate=0.3, method=2) H_1, mask_1 = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=2, learning_rate=0.3, method=0, H=H_0, mask=mask_0) H_2, mask_2 = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=8, learning_rate=0.3, method=0, H=H_1, mask=mask_0) H_3, mask_3 = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=10, learning_rate=0.3, method=0, H=H_2, mask=mask_0) H_4, mask_4 = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=20, learning_rate=0.3, method=0, H=H_3, mask=mask_0) H_cv, mask_cv = ho.findHomographyCV(pts_match_1, pts_match_2, 4) printResults(name+"|0 epochs", H_0, pts_match_1, pts_match_2, mask_0) printResults(name+"|2 epochs", H_1, pts_match_1, pts_match_2, mask_0) printResults(name+"|10 epochs", H_2, pts_match_1, pts_match_2, mask_0) printResults(name+"|20 epochs", H_3, pts_match_1, pts_match_2, mask_0) printResults(name+"|40 epochs", H_4, pts_match_1, pts_match_2, mask_0) printResults(name+"|0 epochs", H_0, img1.points, img2.points, None) printResults(name+"|2 epochs", H_1, img1.points, img2.points, None) printResults(name+"|10 epochs", H_2, img1.points, img2.points, None) printResults(name+"|20 epochs", H_3, img1.points, img2.points, None) printResults(name+"|40 epochs", H_4, img1.points, img2.points, None) printResults(name+"|cv", H_cv, img1.points, img2.points, None)
def compareWithout(name, img1: ImageWithPoints, img2: ImageWithPoints): '''Comparation with OpenCV. Both methods use the same keypoints. Our system does not use a prior SVD to find an estimation.''' pts_match_1, pts_match_2, match_good = ho.matchKeypoints(img1.kp, img1.des, img2.kp, img2.des) H_0 = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) # H_0, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=0,learning_rate=0.3,ransac=2) H_cv, mask_0 = ho.findHomographyCV(pts_match_1, pts_match_2, 4) printResults(name+"|cv", H_cv, pts_match_1, pts_match_2, mask_0) # printResults(name+"|cv", H_cv, img1.points, img2.points, None) H_0, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=100, learning_rate=0.3, method=0, H=H_0, mask=mask_0) printResults(name+"|100 epochs", H_0, pts_match_1, pts_match_2, mask_0) # printResults(name+"|100 epochs", H_0, img1.points, img2.points, None) # H_0 = H_0 / H_0[2, 2] # print(np.linalg.norm(H_0- H_cv)) H_0, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=100, learning_rate=0.3, method=0, H=H_0, mask=mask_0) # printResults(name+"|200 epochs", H_0, pts_match_1, pts_match_2, mask_0) # printResults(name+"|200 epochs", H_0, img1.points, img2.points, None) # H_0 = H_0 / H_0[2, 2] # print(np.linalg.norm(H_0- H_cv)) H_0, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=100, learning_rate=0.3, method=0, H=H_0, mask=mask_0) printResults(name+"|300 epochs", H_0, pts_match_1, pts_match_2, mask_0) # printResults(name+"|300 epochs", H_0, img1.points, img2.points, None) # H_0 = H_0 / H_0[2, 2] # print(np.linalg.norm(H_0- H_cv)) H_0, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=100, learning_rate=0.3, method=0, H=H_0, mask=mask_0) # printResults(name+"|400 epochs", H_0, pts_match_1, pts_match_2, mask_0) # printResults(name+"|400 epochs", H_0, img1.points, img2.points, None) # H_0 = H_0 / H_0[2, 2] # print(np.linalg.norm(H_0- H_cv)) H_0, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=100, learning_rate=0.3, method=0, H=H_0, mask=mask_0) printResults(name+"|500 epochs", H_0, pts_match_1, pts_match_2, mask_0)
def compareAndShow(img1: ImageWithPoints, img2: ImageWithPoints): pts_match_1, pts_match_2, match_good = ho.matchKeypoints(img1.kp, img1.des, img2.kp, img2.des) H_tf, mask_tf = ho.findHomography(pts_match_1, pts_match_2, 4) H_cv, mask_cv = ho.findHomographyCV(pts_match_1, pts_match_2, 4) image = ho.polyline(img2.points, img2.image, color=(0, 0, 255)) image_tf = ho.polyline(ho.project(H_tf, img1.points), image, color=(255, 0, 0)) image_cv = ho.polyline(ho.project(H_cv, img1.points), image, color=(0, 255, 0)) while True: cv2.imshow('image', image) cv2.imshow('tf', image_tf) cv2.imshow('cv', image_cv) k = cv2.waitKey(10) & 0xFF if k == 27: break
def compareLearningRate(name, img1: ImageWithPoints, img2: ImageWithPoints): pts_match_1, pts_match_2, match_good = ho.matchKeypoints(img1.kp, img1.des, img2.kp, img2.des) H, mask = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=0) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=50, method=0, H=H, mask=mask, learning_rate=0.5) printResults(name+"|lr=0.3|200", H, pts_match_1, pts_match_2, mask) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=300, method=0, H=H, mask=mask, learning_rate=0.5) printResults(name+"|lr=0.3|500", H, pts_match_1, pts_match_2, mask) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=50, method=0, H=H, mask=mask, learning_rate=0.8) printResults(name+"|lr=0.5|200", H, pts_match_1, pts_match_2, mask) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=300, method=0, H=H, mask=mask, learning_rate=0.8) printResults(name+"|lr=0.5|500", H, pts_match_1, pts_match_2, mask) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=50, method=0, H=H, mask=mask, learning_rate=1) printResults(name+"|lr=0.7|200", H, pts_match_1, pts_match_2, mask) H, _ = ho.findHomography(pts_match_1, pts_match_2, 4, epochs=300, method=0, H=H, mask=mask, learning_rate=1) printResults(name+"|lr=0.7|500", H, pts_match_1, pts_match_2, mask)
image_test = cv2.imread("res/graffiti/11.png") image_reference = cv2.imread("res/graffiti/ref.png") corners = np.float32([[54, 26], [2672, 54], [2679, 1905], [21, 1900]]) # resize the reference image image_reference = cv2.resize(image_reference, (0, 0), fx=0.25, fy=0.25) corners /= 4 pts_match_1, pts_match_2, _ = ho.match(image_reference, image_test) H = np.identity(3) _, mask = ho.findHomography(pts_match_1, pts_match_2) # wait cv2.imshow('frame', image_test) k = cv2.waitKey(1000) & 0xFF with ho.Graph() as graph: iter = 0 for i in range(0, 140): if i < 50: iter += 1 # 50 times 1 epoch H, mask = ho.findHomography(pts_match_1, pts_match_2, 2, epochs=1, learning_rate=0.3, H=H, mask=mask, method=0, graph=graph, normalization=1) error = ho.distanceError(H, pts_match_1, pts_match_2, mask).mean() image = ho.polyline(ho.project(H, corners), image_test, color=(0, 0, 255)) #red image = cv2.putText(image,"iteration "+str(iter)+", error: "+str(error),(10,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,0),2,cv2.LINE_AA)
"""Finds keypoints between two images and calculates a homography. Must be run from the parent directory, e.g.: .../Mehrkamera/src> python -m examples.example1 """ import numpy as np import cv2 import modules.homography as ho image_test = cv2.imread("res/graffiti/1.png") image_reference = cv2.imread("res/graffiti/ref.png") corners = np.float32([[54, 26], [2672, 54], [2679, 1905], [21, 1900]]) # resize the reference image image_reference = cv2.resize(image_reference, (0, 0), fx=0.25, fy=0.25) corners /= 4 points_reference, points_test, _ = ho.match(image_reference, image_test) matrix, _ = ho.findHomography(points_reference, points_test) corners_projected = ho.project(matrix, corners) test_with_corners = ho.polyline(corners_projected, image_test) cv2.imshow('test', test_with_corners) cv2.waitKey(0) cv2.destroyAllWindows()
""" This example combines two images into a larger image using a homography projection. """ import cv2 import modules.homography as ho import numpy as np image_1 = cv2.imread("res/graffiti/9.png") image_2 = cv2.imread("res/graffiti/10.png") pts_1, pts_2, all_keypoints = ho.match(image_1, image_2) H, mask = ho.findHomography(pts_1, pts_2) image_keypoints = ho.drawMatches(all_keypoints, image_1, image_2, mask) cv2.imshow('keypoints', image_keypoints) image_1_warp = cv2.warpPerspective(image_1, H, (1000, 800)) image_2_extended = np.zeros((800, 1000, 3), np.uint8) image_2_extended[:480, :640, :] = image_2 image_blend = image_1_warp / 256 * 0.5 + image_2_extended / 256 * 0.5 cv2.imshow('blend', image_blend) cv2.waitKey(0) cv2.destroyAllWindows()
_, diff1 = cv2.threshold(diff1, 16, 255, cv2.THRESH_BINARY) cv2.imshow('diff1', diff1) diff2 = cv2.absdiff(ref2, frame2) diff2 = cv2.cvtColor(diff2, cv2.COLOR_BGR2GRAY) diff2 = cv2.GaussianBlur(diff2, (9, 9), 0) _, diff2 = cv2.threshold(diff2, 16, 255, cv2.THRESH_BINARY) cv2.imshow('diff2', diff2) kp_1, des_1 = ho.findKeypoints(frame1, diff1) kp_2, des_2 = ho.findKeypoints(frame2, diff2) if len(kp_1) > 20 and len(kp_2) > 20: pts_1, pts_2, good = ho.matchKeypoints(kp_1, des_1, kp_2, des_2) if pts_1.shape[0] >= 10: H, mask = ho.findHomography(pts_1, pts_2, 3, graph=graph) if mask.sum() >= 10: frame1Warp = cv2.warpPerspective( frame1, H, (frame1.shape[1], frame1.shape[0])) frameBlend = frame1Warp / 256 * 0.5 + frame2 / 256 * 0.5 cv2.imshow('frame4', frameBlend) cv2.imshow('frame1', frame1) cv2.imshow('frame2', frame2) k = cv2.waitKey(1) & 0xFF if k == 27: break cv2.destroyAllWindows() exit()