def get_krt(im1, im2): ims = [im1, im2] sifts = [] for x in range(2): sifts.append(ims[x][:-3] + "sift") # compute features #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift') sift.process_image(ims[0], sifts[0]) l0, d0 = sift.read_features_from_file(sifts[0]) #sift.process_image('../../data/book_perspective.JPG','../../data/im1.sift') sift.process_image(ims[1], sifts[1]) l1, d1 = sift.read_features_from_file(sifts[1]) # match features and estimate homography matches = sift.match_twosided(d0, d1) ndx = matches.nonzero()[0] fp = homography.make_homog(l0[ndx, :2].T) ndx2 = [int(matches[i]) for i in ndx] print len(ndx2) tp = homography.make_homog(l1[ndx2, :2].T) model = homography.RansacModel() H, ransac_data = homography.H_from_ransac(fp, tp, model) # camera calibration #K = camera.my_calibration((747,1000)) K = camera.my_calibration((Image.open(im2).size)) # 3D points at plane z=0 with sides of length 0.2 box = cube.cube_points([0, 0, 0.1], 0.1) # project bottom square in first image cam1 = camera.Camera(hstack((K, dot(K, array([[0], [0], [-1]]))))) # first points are the bottom square box_cam1 = cam1.project(homography.make_homog(box[:, :5])) # use H to transfer points to the second image print dot(H, box_cam1) box_trans = homography.normalize(dot(H, box_cam1)) # compute second camera matrix from cam1 and H cam2 = camera.Camera(dot(H, cam1.P)) A = dot(linalg.inv(K), cam2.P[:, :3]) A = array([A[:, 0], A[:, 1], cross(A[:, 0], A[:, 1])]).T cam2.P[:, :3] = dot(K, A) # project with the second camera box_cam2 = cam2.project(homography.make_homog(box)) # test: projecting point on z=0 should give the same point = array([1, 1, 0, 1]).T print homography.normalize(dot(dot(H, cam1.P), point)) print cam2.project(point) import pickle with open('%s.pkl' % ims[1][:-4], 'w') as f: pickle.dump(K, f) pickle.dump(dot(linalg.inv(K), cam2.P), f) sys.stderr.write("K and Rt dumped to %s.pkl\n" % ims[1][:-4])
def get_H(im0_path, im1_path): """ Get the Homography matrix. """ # compute features sift.process_image(im0_path, 'im0.sift') l0, d0 = sift.read_features_from_file('im0.sift') sift.process_image(im1_path, 'im1.sift') l1, d1 = sift.read_features_from_file('im1.sift') # match features and estimate homography matches = sift.match_twosided(d0, d1) ndx = matches.nonzero()[0] fp = homography.make_homog(l0[ndx, :2].T) ndx2 = [int(matches[i]) for i in ndx] tp = homography.make_homog(l1[ndx2, :2].T) model = homography.RansacModel() H = homography.H_from_ransac(fp, tp, model)[0] return H
def compute_homography(): # compute features #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift') im1 = '../../data/space_front.jpg' im2 = '../../data/space_perspective.jpg' im1 = '../../data/mag_front.jpg' im2 = '../../data/mag_perspective.jpg' ims = [im1, im2] sifts = [] for k in range(2): sifts.append(ims[k][:-4] + ".sift") l0, d0 = sift.read_features_from_file(sifts[0]) l1, d1 = sift.read_features_from_file(sifts[1]) # match features and estimate homography matches = sift.match_twosided(d0, d1) ndx = matches.nonzero()[0] fp = homography.make_homog(l0[ndx, :2].T) ndx2 = [int(matches[i]) for i in ndx] tp = homography.make_homog(l1[ndx2, :2].T) model = homography.RansacModel() H, ransac_data = homography.H_from_ransac(fp, tp, model) return H
#matches[i] = sift.match_twosided(d[i + 1], d[i]) pickle.dump(matches, open('out_ch03_pano.pickle', 'wb')) matches = pickle.load(open('out_ch03_pano.pickle', 'rb')) tic.k('matched') def convert_points(j): ndx = matches[j].nonzero()[0] fp = homography.make_homog(l[j + 1][ndx, :2].T) ndx2 = [int(matches[j][i]) for i in ndx] tp = homography.make_homog(l[j][ndx2, :2].T) return fp, tp model = homography.RansacModel() fp, tp = convert_points(1) H_12 = homography.H_from_ransac(fp, tp, model)[0] tic.k('12 homogd') fp, tp = convert_points(0) H_01 = homography.H_from_ransac(fp, tp, model)[0] tic.k('01 homogd') tp, fp = convert_points(2) # Note: Reversed. H_32 = homography.H_from_ransac(fp, tp, model)[0] tic.k('32 homogd') tp, fp = convert_points(3) # Note: Reversed. H_43 = homography.H_from_ransac(fp, tp, model)[0]
m_ORB = cv2.ORB_create() kp1, des1 = m_ORB.detectAndCompute(G_img1, None) kp2, des2 = m_ORB.detectAndCompute(G_img2, None) bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) matches = bf.match(des1, des2) matches = sorted( matches, key=lambda x: x.distance) # Sort them in the order of their distance. obj1, obj2 = Cli.Manage_data(matches, kp1, kp2) model = h**o.RansacModel() H = h**o.H_from_ransac(obj1, obj2, model) # # Draw first 60 matches. # img3 = img2 # img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:60], img3) # cv2.imshow('result', img3) # cv2.imwrite('result.jpg', img3) # cv2.waitKey() # 位于边长为0.2, z = 0平面上底部的正方形 Box = cube_points([0, 0, 0.1], 0.1) cam1 = Cli.Camera(