l2, d2 = sift.read_features_from_file('im2.sift') # match features matches = sift.match_twosided(d1, d2) ndx = matches.nonzero()[0] # make homogeneous and normalize with inv(K) x1 = homography.make_homog(l1[ndx, :2].T) ndx2 = [int(matches[i]) for i in ndx] x2 = homography.make_homog(l2[ndx2, :2].T) x1n = np.dot(np.linalg.inv(K), x1) x2n = np.dot(np.linalg.inv(K), x2) # estimate E with RANSAC model = sfm.RansacModel() E, inliers = sfm.F_from_ransac(x1n, x2n, model) # compute camera matrices (P2 will be list of four solutions) P1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]) P2 = sfm.compute_P_from_essential(E) # pick the solution with points in front of cameras ind = 0 maxres = 0 for i in range(4): # triangulate inliers and compute depth for each camera X = sfm.triangulate(x1n[:, inliers], x2n[:, inliers], P1, P2[i]) d1 = np.dot(P1, X)[2] d2 = np.dot(P2[i], X)[2] if np.sum(d1 > 0) + np.sum(d2 > 0) > maxres:
def draw3d_reconstracted_estP(self): self.ax_rcn_est.cla() # カメラ行列(内部・外部) M1 = self.K.dot(self.P1) M2 = self.K.dot(self.P2) # 画像座標 ( sx, sy, s ) x1p = M1.dot(self.Xpt) x2p = M2.dot(self.Xpt) # sで正規化 (x, y, 1) for i in range(3): x1p[i] /= x1p[2] x2p[i] /= x2p[2] # pts1 = [] # pts2 = [] # for i in range(len(x1p[0])): # pts1.append([x1p[0][i], x1p[1][i]]) # pts2.append([x2p[0][i], x2p[1][i]]) # pts1 = np.int32(pts1) # pts2 = np.int32(pts2) # 画像1, 画像2の特徴点を対応付ける行列Fを計算 # F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC) # mask = np.reshape(mask, (1, len(mask)))[0] # idx_mask = np.arange(len(mask)) # idx_mask = idx_mask[mask == 1] x1n = np.dot(inv(self.K), x1p) x2n = np.dot(inv(self.K), x2p) # RANSACでEを推定 # Method_EstE = "RAN" Method_EstE = "" E = "" inliers = "" if Method_EstE is "RAN": model = sfm.RansacModel() E, inliers = sfm.F_from_ransac(x1n, x2n, model) else: E = sfm.compute_fundamental(x1n, x2n) inliers = [True for i in range(len(x1n[0, :]))] print("E: ", E) # # カメラ行列を計算する(P2は4つの解のリスト) P1 = array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]) P2, S, R1, R2, U, V = sfm.compute_P_from_essential_mod(E) print("S: ", S) print("S fro: ", np.linalg.norm(S, "fro")) print("R1: ", R1) print("R2: ", R2) print("R1 deg: ", calcRmatDeg(R1)) print("R2 deg: ", calcRmatDeg(R2)) P2_R = self.P2[:, 0:3] print("P2_R: ", P2_R) # print("U: ", U) # print("V: ", V) # print("S*R1: ", S.dot(R1)) # print("S*R2: ", S.dot(R2)) # 2つのカメラの前に点のある解を選ぶ ind = 0 maxres = 0 for i in range(4): # triangulate inliers and compute depth for each camera # インライアを三角測量し各カメラからの奥行きを計算する X = sfm.triangulate(x1n[:, inliers], x2n[:, inliers], P1, P2[i]) d1 = np.dot(P1, X)[2] d2 = np.dot(P2[i], X)[2] if sum(d1 > 0) + sum(d2 > 0) > maxres: maxres = sum(d1 > 0) + sum(d2 > 0) ind = i infront = (d1 > 0) & (d2 > 0) #全表示用 M1 = self.K.dot(P1) M2_est = self.K.dot(P2[ind]) X = sfm.triangulate(x1p, x2p, M1, M2_est) # print("P2 est:", P2[ind]) # print("P2: ", self.P2) # for i in range(4): # print("P2 est[ ", i, " ]: ", P2[i]) # # M2 = self.K.dot(self.P2) # print("M2: ", M2) self.ax_rcn_est.set_xlabel("x_wld_rcn") self.ax_rcn_est.set_ylabel("y_wld_rcn") self.ax_rcn_est.set_zlabel("z_wld_rcn") # self.ax_rcn_est.set_xlim([-5, 5]) # self.ax_rcn_est.set_ylim([-5, 5]) # self.ax_rcn_est.set_zlim([-5, 5]) self.ax_rcn_est.set_title("obj wld rcn est") for l in self.ll: x = np.array([X[0][l[0]], X[0][l[1]]]) y = np.array([X[1][l[0]], X[1][l[1]]]) z = np.array([X[2][l[0]], X[2][l[1]]]) self.ax_rcn_est.plot(x, y, z, marker='.', markersize=5, color='red') # # 対応が見つからなかった点 # x = X[0,[ not val for val in mask]] # y = X[1,[ not val for val in mask]] # z = X[2,[ not val for val in mask]] # self.ax_rcn_est.plot(x, y, z, marker='.', markersize=5, color='black', linestyle = "") # カメラより奥にあると推定された点 # X = sfm.triangulate(x1[:, idx_mask], x2[:, idx_mask], P1, P2[i]) x = X[0, [not val for val in infront]] y = X[1, [not val for val in infront]] z = X[2, [not val for val in infront]] self.ax_rcn_est.plot(x, y, z, marker='.', markersize=5, color='blue', linestyle="")