def sim3_transform_map(result, nrCameras, nrPoints): # Similarity transform s_poses = [] s_points = [] _sim3 = sim3.Similarity3() for i in range(nrCameras): pose_i = result.atPose3(symbol(ord('x'), i)) s_poses.append(pose_i) for j in range(nrPoints): point_j = result.atPoint3(symbol(ord('p'), j)) s_points.append(point_j) theta = np.radians(30) wRc = gtsam.Rot3(np.array([[-math.sin(theta), 0, math.cos(theta)], [-math.cos(theta), 0, -math.sin(theta)], [0, 1, 0]])) d_pose1 = gtsam.Pose3( wRc, gtsam.Point3(-math.sin(theta)*1.58, -math.cos(theta)*1.58, 1.2)) d_pose2 = gtsam.Pose3( wRc, gtsam.Point3(-math.sin(theta)*1.58*3, -math.cos(theta)*1.58*3, 1.2)) pose_pairs = [[s_poses[2], d_pose2], [s_poses[0], d_pose1]] _sim3.align_pose(pose_pairs) print('R', _sim3._R) print('t', _sim3._t) print('s', _sim3._s) s_map = (s_poses, s_points) actual_poses, actual_points = _sim3.map_transform(s_map) d_map = _sim3.map_transform(s_map) return _sim3, d_map
def test_bundle_adjustment_with_error(self): """Test Structure from Motion solution with ill estimated prior value.""" # Create the set of actual points and poses actual_points = [] actual_poses = [] # Create the set of ground-truth landmarks points = sfm_data.create_points() # Create the set of ground-truth poses poses = sfm_data.create_poses() # Create the nrCameras*nrPoints feature point data input for atrium_sfm() feature_data = sfm_data.Data(self.nrCameras, self.nrPoints) # Project points back to the camera to generate feature points for i, pose in enumerate(poses): for j, point in enumerate(points): feature_data.J[i][j] = j camera = gtsam.PinholeCameraCal3_S2(pose, self.calibration) feature_data.Z[i][j] = camera.project(point) result = self.sfm.bundle_adjustment( feature_data, 2.5, self.rotation_error, self.translation_error) # Similarity transform s_poses = [] s_points = [] _sim3 = sim3.Similarity3() for i in range(len(poses)): pose_i = result.atPose3(symbol(ord('x'), i)) s_poses.append(pose_i) for j in range(len(points)): point_j = result.atPoint3(symbol(ord('p'), j)) s_points.append(point_j) pose_pairs = [[s_poses[2], poses[2]], [s_poses[1], poses[1]], [s_poses[0], poses[0]]] _sim3.sim3_pose(pose_pairs) s_map = (s_poses, s_points) actual_poses, actual_points = _sim3.map_transform(s_map) # Compare output poses with ground truth poses for i, pose in enumerate(actual_poses): self.assert_gtsam_equals(pose, poses[i],1e-2) # Compare output points with ground truth points for i, point in enumerate(actual_points): self.assert_gtsam_equals(point, points[i], 1e-2)
def sim3_transform_map(self, result, img_pose_id_list, landmark_id_list): # Similarity transform s_poses = [] s_points = [] _sim3 = sim3.Similarity3() theta = np.radians(30) wRc = gtsam.Rot3( np.array([[-math.sin(theta), 0, math.cos(theta)], [-math.cos(theta), 0, -math.sin(theta)], [0, 1, 0]])) pose_pairs = [] for i, idx in enumerate(img_pose_id_list): pose_i = result.atPose3(symbol(ord('x'), idx)) s_poses.append(pose_i) if i < 2: d_pose = gtsam.Pose3( wRc, gtsam.Point3(-math.sin(theta) * 1.58 * idx, -math.cos(theta) * 1.58 * idx, 1.2)) pose_pairs.append([s_poses[i], d_pose]) i += 1 for idx in landmark_id_list: point_j = result.atPoint3(symbol(ord('p'), idx)) s_points.append(point_j) _sim3.align_pose(pose_pairs) print('R', _sim3._R) print('t', _sim3._t) print('s', _sim3._s) s_map = (s_poses, s_points) actual_poses, actual_points = _sim3.map_transform(s_map) d_map = _sim3.map_transform(s_map) return _sim3, d_map