Exemplo n.º 1
0
 def calc_vec_errors(vec, point2d):
     r, t = vec[0:3], vec[3:6]
     view_mat, point3d = rodrigues_and_translation_to_view_mat3x4(
         r.reshape(3, 1), t.reshape(3, 1)), vec[6:9]
     return compute_reprojection_errors(point3d.reshape(1, -1),
                                        point2d.reshape(1, -1),
                                        intrinsic_mat @ view_mat)[0]
Exemplo n.º 2
0
 def retriangulate_point(self, point_id):
     coords = []
     frames = []
     v_mats = []
     for i, frame in enumerate(self.corner_storage):
         if point_id in frame.ids and i in self.view_mats.keys():
             idx = np.where(frame.ids == point_id)[0][0]
             coords += [frame.points[idx]]
             frames += [i]
             v_mats += [self.view_mats[i]]
     if len(coords) < 3:
         return None, None
     if len(coords) > self.retriangulate_frames:
         idxs = np.random.choice(len(coords), size=self.retriangulate_frames, replace=False)
         coords = np.array(coords)[idxs]
         frames = np.array(frames)[idxs]
         v_mats = np.array(v_mats)[idxs]
     best_coords = None
     best_cnt = 0
     for _ in range(4):
         i, j = np.random.choice(len(coords), 2, replace=True)
         corrs = Correspondences(np.array([point_id]), np.array([coords[i]]), np.array([coords[j]]))
         point3d, _, _ = triangulate_correspondences(corrs, v_mats[i], v_mats[j], self.intrinsic_mat,
                                                     self.triangulation_parameters)
         if len(point3d) == 0:
             continue
         errors = np.array([compute_reprojection_errors(point3d, np.array([p]), self.intrinsic_mat @ m)
                            for m, p in zip(v_mats, coords)])
         cnt = np.sum(errors < self.max_repr_error)
         if best_coords is None or best_cnt < cnt:
             best_cnt = cnt
             best_coords = point3d
     if best_cnt == 0:
         return None, None
     return best_coords, best_cnt
Exemplo n.º 3
0
def build_view_mat(
    common_obj: np.ndarray, common_img: np.ndarray, common_ids: np.ndarray,
    intrinsic_mat: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float]:
    _, rvec, tvec, inliers = cv2.solvePnPRansac(
        common_obj,
        common_img,
        intrinsic_mat,
        None,
        #flags=cv2.SOLVEPNP_EPNP,
        reprojectionError=RANSAC_REPROJECTION_ERROR,
        iterationsCount=100)
    mat = rodrigues_and_translation_to_view_mat3x4(rvec, tvec)

    inliers = np.array(inliers).reshape(-1)
    inlier_points = common_obj[inliers]
    inlier_img = common_img[inliers]
    inlier_ids = common_ids[inliers]
    reprojection_errors = compute_reprojection_errors(inlier_points,
                                                      inlier_img,
                                                      intrinsic_mat @ mat)
    #to_take = reprojection_errors < RANSAC_REPROJECTION_ERROR
    #reprojection_error = compute_reprojection_errors(inlier_points, inlier_img, intrinsic_mat @ mat).mean()

    #reprojection_error = np.linalg.norm(common_img - projections, axis=-1).mean()

    #return mat, inlier_points[to_take], inlier_ids[to_take], reprojection_errors[to_take].mean()
    return mat, inlier_points, inlier_ids, reprojection_errors.mean()
Exemplo n.º 4
0
 def pnp(self, cloud: PointCloudBuilder, camera):
     ids1, ids2 = build_index_intersection(cloud.ids, self.corners.ids)
     try:
         ret, rvec, tvec, inliers = cv2.solvePnPRansac(
             cloud.points[ids1],
             self.corners.points[ids2],
             camera,
             None,
             reprojectionError=MAX_REPROJECTION_ERROR)
     except:
         return self.last_inliners
     if not ret or len(inliers) < FrameTrack.MIN_INLIERS or np.linalg.norm(
             tvec) > MAX_TRANSLATION:
         return self.last_inliners
     potential_new_mtx = rodrigues_and_translation_to_view_mat3x4(
         rvec, tvec)
     potential_reprojection_error = compute_reprojection_errors(
         cloud.points[ids1], self.corners.points[ids2],
         camera @ potential_new_mtx)
     if self.current_reproject_error is None or potential_reprojection_error.mean(
     ) < self.current_reproject_error:
         self.mtx = potential_new_mtx
         self.current_reproject_error = potential_reprojection_error.mean()
         self.last_inliners = cloud.ids[ids1][np.array(inliers)]
     return self.last_inliners
Exemplo n.º 5
0
def get_residuals(pc_builder, point_id_to_cloud, list_of_corners,
                  intrinsic_mat, matches, view_mats):
    return np.square([
        compute_reprojection_errors(
            pc_builder.
            points[point_id_to_cloud[point_id]:point_id_to_cloud[point_id] +
                   1],
            list_of_corners[frame_id].points[point_index:point_index + 1],
            intrinsic_mat @ view_mats[frame_id])[0]
        for point_id, frame_id, point_index in matches
    ])
Exemplo n.º 6
0
 def try_restore_view(self, frame_idx: int) -> Tuple[Optional[np.ndarray], int, np.float64]:
     corners = self.corner_storage[frame_idx]
     _, (corner_ids, cloud_ids) = snp.intersect(corners.ids.flatten(),
                                                np.sort(self.point_cloud_builder.ids.flatten()), indices=True)
     points_3d = self.point_cloud_builder.points[cloud_ids]
     points_2d = corners.points[corner_ids]
     result = self.try_solve_pnp(points_3d, points_2d)
     if result is None:
         return None, 0, np.inf
     view, inlier_ids = result
     inlier_ids = inlier_ids.flatten()
     reprojection_errors = compute_reprojection_errors(points_3d[inlier_ids], points_2d[inlier_ids],
                                                       self.intrinsic_mat @ view)
     return view, len(inlier_ids), reprojection_errors.mean()
Exemplo n.º 7
0
    def _retriangulate(self, point_id, max_pairs=5):
        frames, corners, poses = [], [], []
        for frame, index_on_frame in self.corner_pos_in_frames[point_id]:
            if self.tracked_poses[frame] is not None:
                frames.append(frame)
                corners.append(
                    self.corner_storage[frame].points[index_on_frame])
                poses.append(self.tracked_poses[frame].pos)

        if len(frames) < 2:
            return None
        if len(frames) == 2:
            cloud_pts, _ = self._triangulate(frames[0], frames[1])
            if len(cloud_pts) == 0:
                return None
            return cloud_pts[0], 2

        frames, corners, poses = np.array(frames), np.array(corners), np.array(
            poses)

        best_pos, best_inliers = None, None
        for _ in range(max_pairs):
            frame_1, frame_2 = np.random.choice(len(frames), 2, replace=False)
            corner_pos_1, corner_pos_2 = corners[frame_1], corners[frame_2]
            cloud_pts, _, _ = triangulate_correspondences(
                Correspondences(np.zeros(1), np.array([corner_pos_1]),
                                np.array([corner_pos_2])), poses[frame_1],
                poses[frame_2], self.intrinsic_mat,
                TriangulationParameters(self.MAX_REPROJ_ERR, 2.5, 0.0))
            if len(cloud_pts) == 0:
                continue

            inliers = 0
            for frame, corner in zip(frames, corners):
                repr_errors = compute_reprojection_errors(
                    cloud_pts, np.array([corner]),
                    self.intrinsic_mat @ self.tracked_poses[frame].pos)
                inliers += np.sum(repr_errors <= self.MAX_REPROJ_ERR)

            if best_pos is None or best_inliers < inliers:
                best_pos = cloud_pts[0]
                best_inliers = inliers

        if best_pos is None:
            return None
        return best_pos, best_inliers
Exemplo n.º 8
0
 def update_reproj_error(self, cloud: PointCloudBuilder, camera):
     ids1, ids2 = build_index_intersection(cloud.ids, self.corners.ids)
     self.current_reproject_error = compute_reprojection_errors(
         cloud.points[ids1], self.corners.points[ids2],
         camera @ self.mtx).mean()
    def get_first_cam_pose(frame_0, frame_1):
        corrs = build_correspondences(corner_storage[frame_0],
                                      corner_storage[frame_1])
        e_mat, _ = cv2.findEssentialMat(corrs.points_1, corrs.points_2,
                                        intrinsic_mat, cv2.RANSAC, 0.999, 2.0)

        def essential_mat_to_camera_view_matrix(e_mat):
            U, S, VH = np.linalg.svd(e_mat, full_matrices=True)
            W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]], dtype=np.float)
            u3 = U[:, 2]
            possible_R = [U @ W @ VH, U @ W.T @ VH]
            possible_t = [u3, -u3]
            best_R = None
            best_t = None
            max_positive_z = 0
            for R in possible_R:
                for t in possible_t:
                    view_mat0 = np.hstack((np.eye(3), np.zeros((3, 1))))
                    view_mat1 = np.hstack((R, t.reshape((3, 1))))
                    view_mats[frame_0] = view_mat0
                    view_mats[frame_1] = view_mat1
                    points3d, ids, median_cos = triangulate(frame_0, frame_1)
                    cur_positive_z = 0
                    for pt in points3d:
                        pt = np.append(pt, np.zeros((1))).reshape((4, 1))
                        pt_transformed0 = (view_mat0 @ pt).flatten()
                        z0 = pt_transformed0[2]
                        pt_transformed1 = (view_mat1 @ pt).flatten()
                        z1 = pt_transformed1[2]
                        cur_positive_z += (z0 > 0.1) + (z1 > 0.1)
                    if cur_positive_z > max_positive_z:
                        max_positive_z = cur_positive_z
                        best_R = R
                        best_t = t
            return best_R, best_t

        R, t = essential_mat_to_camera_view_matrix(e_mat)
        baseline = np.linalg.norm(t)

        view_mat0 = np.hstack((np.eye(3), np.zeros((3, 1))))
        view_mat1 = np.hstack((R, t.reshape((3, 1))))
        view_mats[frame_0] = view_mat0
        view_mats[frame_1] = view_mat1
        points3d, ids, median_cos = triangulate(frame_0, frame_1)
        points0 = []
        points1 = []
        for i in ids:
            for j in frames_of_corner[i]:
                if j[0] == frame_0:
                    points0.append(j[1])
                if j[0] == frame_1:
                    points1.append(j[1])
        points0 = np.array(points0)
        points1 = np.array(points1)
        sum_error = compute_reprojection_errors(
            points3d, corner_storage[frame_0].points[points0],
            intrinsic_mat @ view_mat0) + compute_reprojection_errors(
                points3d, corner_storage[frame_1].points[points1],
                intrinsic_mat @ view_mat1)
        reprojection_error = np.mean(sum_error)
        points3d_count = len(points3d)

        view_mats[frame_0] = None
        view_mats[frame_1] = None

        #проверяем насколько хороша данная пара кадров, и заодно проверяем что матрица R - реально поворот
        if baseline < BASELINE_THRESHOLD or reprojection_error > REPROJECTION_ERROR_THRESHOLD or points3d_count < MIN_3D_POINTS or not check_if_mat_is_rot_mat(
                np.hstack((R, t.reshape((3, 1))))):
            return False, baseline, reprojection_error, points3d_count, None, None
        else:
            return True, baseline, reprojection_error, points3d_count, R, t