Ejemplo n.º 1
0
def get_matrix_poses(corner_storage, intrisinc_mat):
    pairs = get_best_intersected(corner_storage)
    best_pair = -1, -1
    best_pair_result = -1

    for i, j, _ in pairs[:100]:
        ids1, ids2 = build_index_intersection(corner_storage[i].ids,
                                              corner_storage[j].ids)
        points1 = corner_storage[i].points[ids1]
        points2 = corner_storage[j].points[ids2]

        E, mask = cv2.findEssentialMat(points1,
                                       points2,
                                       focal=intrisinc_mat[0][0])
        if mask.sum() < 10:
            continue
        F, mask = cv2.findFundamentalMat(points1, points2)
        if mask.sum() < 10:
            continue
        _, R, t, mask = cv2.recoverPose(E,
                                        points1,
                                        points1,
                                        focal=intrisinc_mat[0][0])
        if mask.sum() < 10:
            continue

        corrs = build_correspondences(corner_storage[i], corner_storage[j])
        points, ids, _ = triangulate_correspondences(
            corrs, eye3x4(), np.hstack((R, t)), intrisinc_mat,
            TriangulationParameters(max_reprojection_error=5,
                                    min_triangulation_angle_deg=5.,
                                    min_depth=0.001))
        current_result = len(ids) // 20
        if current_result > best_pair_result:
            best_pair = i, j
            best_pair_result = current_result

    i, j = best_pair

    ids1, ids2 = build_index_intersection(corner_storage[i].ids,
                                          corner_storage[j].ids)
    points1 = corner_storage[i].points[ids1]
    points2 = corner_storage[j].points[ids2]

    E, mask = cv2.findEssentialMat(points1, points2, focal=intrisinc_mat[0][0])
    F, mask = cv2.findFundamentalMat(points1, points2)
    _, R, t, mask = cv2.recoverPose(E,
                                    points1,
                                    points1,
                                    focal=intrisinc_mat[0][0])

    print(f"Chosen frames {i} and {j}")

    return (i, view_mat3x4_to_pose(eye3x4())),\
           (j, view_mat3x4_to_pose(np.hstack((R, t))))
Ejemplo n.º 2
0
def calculate_known_views(
    intrinsic_mat,
    corner_storage: CornerStorage,
    min_correspondencies_count=100,
    max_homography=0.7,
) -> Tuple[Tuple[int, Pose], Tuple[int, Pose]]:
    best_points_num, best_known_views = -1, ((None, None), (None, None))
    for i in range(len(corner_storage)):
        for j in range(i + 1, min(i + 40, len(corner_storage))):
            corresp = build_correspondences(corner_storage[i],
                                            corner_storage[j])
            if len(corresp[0]) < min_correspondencies_count:
                break
            E, mask = cv2.findEssentialMat(corresp.points_1,
                                           corresp.points_2,
                                           cameraMatrix=intrinsic_mat,
                                           method=cv2.RANSAC)
            mask = (mask.squeeze() == 1)
            corresp = Correspondences(corresp.ids[mask],
                                      corresp.points_1[mask],
                                      corresp.points_2[mask])
            if E is None:
                continue

            # Validate E using homography
            H, mask = cv2.findHomography(corresp.points_1,
                                         corresp.points_2,
                                         method=cv2.RANSAC)
            if np.count_nonzero(mask) / len(corresp.ids) > max_homography:
                continue

            R1, R2, T = cv2.decomposeEssentialMat(E)
            for view_mat2 in [
                    np.hstack((R, t)) for R in [R1, R2] for t in [T, -T]
            ]:
                view_mat1 = np.eye(3, 4)
                points, _, _ = triangulate_correspondences(
                    corresp, view_mat1, view_mat2, intrinsic_mat,
                    triang_params)
                print('Try frames {}, {}: {} correspondent points'.format(
                    i, j, len(points)))
                if len(points) > best_points_num:
                    best_known_views = ((i, view_mat3x4_to_pose(view_mat1)),
                                        (j, view_mat3x4_to_pose(view_mat2)))
                    best_points_num = len(points)
                    if best_points_num > 1500:
                        return best_known_views
    return best_known_views
Ejemplo n.º 3
0
def _test_frame_pair(intrinsic_mat: np.ndarray,
                     corners_1: FrameCorners,
                     corners_2: FrameCorners,
                     param_koeff: float = 1) -> Tuple[int, Optional[Pose]]:
    correspondences = build_correspondences(corners_1, corners_2)
    if len(correspondences.ids) < 6:
        return 0, None
    points2d_1 = correspondences.points_1
    points2d_2 = correspondences.points_2

    essential, essential_inliers = cv2.findEssentialMat(points2d_1, points2d_2, intrinsic_mat, threshold=param_koeff)
    homography, homography_inliers = cv2.findHomography(points2d_1, points2d_2, method=cv2.RANSAC)
    if len(np.where(homography_inliers > 0)[0]) > len(np.where(essential_inliers > 0)[0]):
        return 0, None
    if essential.shape != (3, 3):
        return 0, None
    num_passed, rot, t, mask = cv2.recoverPose(essential, points2d_1, points2d_2,
                                               intrinsic_mat, mask=essential_inliers)

    outlier_ids = np.array(
        [pt_id for pt_id, mask_elem in zip(correspondences.ids, mask) if mask_elem[0] == 0],
        dtype=correspondences.ids.dtype)
    inlier_correspondences = _remove_correspondences_with_ids(correspondences, outlier_ids)
    if len(inlier_correspondences.ids) < 4:
        return 0, None
    view_matr_1 = eye3x4()
    view_matr_2 = np.hstack((rot, t))
    triangulation_params = TriangulationParameters(param_koeff, 1.0 / param_koeff, 0.0)
    pts_3d, trianulated_ids, med_cos = triangulate_correspondences(inlier_correspondences, view_matr_1, view_matr_2,
                                                                   intrinsic_mat, triangulation_params)
    if len(pts_3d) < 4:
        return 0, None
    print(len(inlier_correspondences.ids), len(pts_3d))
    return len(pts_3d), view_mat3x4_to_pose(view_matr_2)
Ejemplo n.º 4
0
def camera_pose(id,
                corner_storage,
                point_cloud_builder,
                intrinsic_mat,
                dist_coef=None):
    frame_corners = corner_storage[id]
    points = frame_corners.points
    frame_ids = frame_corners.ids
    cloud_ids = point_cloud_builder.ids
    ids, frame_indexes, cloud_indexes = np.intersect1d(frame_ids,
                                                       cloud_ids,
                                                       return_indices=True)
    if ids.shape[0] < 4:
        return None

    cloud_points = point_cloud_builder.points[cloud_indexes]
    frame_points = points[frame_indexes]
    retval, rvec, tvec, inliers = cv2.solvePnPRansac(cloud_points,
                                                     frame_points,
                                                     intrinsic_mat, dist_coef)
    if retval is None:
        return None
    retval, rvec, tvec = cv2.solvePnP(cloud_points[inliers],
                                      frame_points[inliers], intrinsic_mat,
                                      dist_coef)
    if retval is None:
        return None
    return view_mat3x4_to_pose(
        rodrigues_and_translation_to_view_mat3x4(rvec, tvec)), np.setdiff1d(
            ids, inliers)
Ejemplo n.º 5
0
def calc_known_views(corner_storage, intrinsic_mat, indent=5):
    num_frames = len(corner_storage)
    known_view_1 = (None, None)
    known_view_2 = (None, None)
    num_points = -1

    for frame_1 in range(num_frames):
        for frame_2 in range(frame_1 + indent, num_frames):
            corrs = build_correspondences(corner_storage[frame_1],
                                          corner_storage[frame_2])

            if len(corrs.ids) < 6:
                continue

            points_1 = corrs.points_1
            points_2 = corrs.points_2

            H, mask_h = cv2.findHomography(points_1,
                                           points_2,
                                           method=cv2.RANSAC)
            if mask_h is None:
                continue

            mask_h = mask_h.reshape(-1)

            E, mask_e = cv2.findEssentialMat(points_1,
                                             points_2,
                                             method=cv2.RANSAC,
                                             cameraMatrix=intrinsic_mat)

            if mask_e is None:
                continue

            mask_e = mask_e.reshape(-1)

            if mask_h.sum() / mask_e.sum() > 0.5:
                continue

            corrs = Correspondences(corrs.ids[(mask_e == 1)],
                                    points_1[(mask_e == 1)],
                                    points_2[(mask_e == 1)])

            R1, R2, t = cv2.decomposeEssentialMat(E)

            for poss_pose in [
                    Pose(R1.T, R1.T @ t),
                    Pose(R1.T, R1.T @ (-t)),
                    Pose(R2.T, R2.T @ t),
                    Pose(R2.T, R2.T @ (-t))
            ]:
                points3d, _, _ = triangulate_correspondences(
                    corrs, eye3x4(), pose_to_view_mat3x4(poss_pose),
                    intrinsic_mat, TriangulationParameters(1, 2, .1))

                if len(points3d) > num_points:
                    num_points = len(points3d)
                    known_view_1 = (frame_1, view_mat3x4_to_pose(eye3x4()))
                    known_view_2 = (frame_2, poss_pose)
    return known_view_1, known_view_2
Ejemplo n.º 6
0
def select_frames(frames, corner_storage, camera_matrix):
    frame1 = (0, view_mat3x4_to_pose(eye3x4()))
    frame2 = (-1, view_mat3x4_to_pose(eye3x4()))

    mx = 0

    for i in range(1, len(frames)):
        correspondences = build_correspondences(corner_storage[frame1[0]],
                                                corner_storage[i])
        if len(correspondences.ids) < 8:
            continue

        E, mask = cv2.findEssentialMat(correspondences.points_1,
                                       correspondences.points_2,
                                       camera_matrix,
                                       method=cv2.RANSAC)

        if mask is None:
            continue

        correspondences = _remove_correspondences_with_ids(
            correspondences, np.argwhere(mask.flatten() == 0))
        if len(correspondences.ids) < 8 or not validate(correspondences, mask):
            continue

        R1, R2, t = cv2.decomposeEssentialMat(E)
        ps = [
            Pose(R1.T, R1.T @ t),
            Pose(R2.T, R2.T @ t),
            Pose(R1.T, R1.T @ (-t)),
            Pose(R2.T, R2.T @ (-t))
        ]

        for pose in ps:
            points, _, _ = triangulate_correspondences(
                correspondences, pose_to_view_mat3x4(frame1[1]),
                pose_to_view_mat3x4(pose), camera_matrix, TRIANG_PARAMS)

            if len(points) > mx:
                frame2 = (i, pose)
                mx = len(points)

    print(frame1[0], frame2[0])

    return frame1, frame2
Ejemplo n.º 7
0
def find_best_start_poses(frames_n, corner_storage, intrinsic_mat):
    best_frames = (0, 0)
    best_snd_pose = None
    best_pose_score = 0
    for first_frame in range(frames_n):
        for second_frame in range(first_frame + 5, frames_n):
            pose, pose_score = get_pose_with_score(first_frame, second_frame,
                                                   corner_storage,
                                                   intrinsic_mat)
            if pose_score > best_pose_score:
                best_frames = (first_frame, second_frame)
                best_snd_pose = pose
                best_pose_score = pose_score
    return best_frames, view_mat3x4_to_pose(eye3x4()), best_snd_pose
Ejemplo n.º 8
0
def _find_pos_pair(intrinsic_mat: np.ndarray,
                   corner_storage: CornerStorage,
                   param_koeff: float = 1) -> Tuple[Tuple[int, Pose], Tuple[int, Pose]]:
    best_num_pts = 0
    best_frame = 1
    best_pose = None
    for frame_number in range(1, len(corner_storage)):
        print(frame_number)
        num_pts, pose = _test_frame_pair(intrinsic_mat, corner_storage[0], corner_storage[frame_number])
        if num_pts > best_num_pts:
            best_num_pts = num_pts
            best_pose = pose
            best_frame = frame_number
    if best_pose is None:
        if param_koeff < 4:
            return _find_pos_pair(intrinsic_mat, corner_storage, param_koeff * 2)
        raise TrackingError("Failed to initialize tracking")
    return (0, view_mat3x4_to_pose(eye3x4())), (best_frame, best_pose)
Ejemplo n.º 9
0
def initial_camera_views(
        corner_storage: CornerStorage, intrinsic_mat: np.ndarray
) -> Tuple[Tuple[int, Pose], Tuple[int, Pose]]:
    max_pose = None
    max_npoints = 0
    maxj = -1
    maxi = -1

    enum_corner_storage = list(enumerate(corner_storage))
    for j, base_frame in tqdm(enum_corner_storage[::32]):
        base_frame = corner_storage[0]
        for i, frame in enum_corner_storage[j + 1:j + 32]:
            pose, npoints = pose_by_frames(base_frame, frame, intrinsic_mat)
            print(i, j, npoints)
            if npoints > max_npoints:
                max_npoints = npoints
                maxi = i
                maxj = j
                max_pose = pose

    print(max_npoints)
    return (maxj, view_mat3x4_to_pose(eye3x4())), (maxi, max_pose)
Ejemplo n.º 10
0
    def track(self):
        step_num = 1
        num_of_defined_poses = np.sum([
            track_pos_info is not None for track_pos_info in self.tracked_poses
        ])
        while num_of_defined_poses != self.num_of_frames:
            unsolved_frames = [
                i for i in range(self.num_of_frames)
                if self.tracked_poses[i] is None
            ]

            new_cams_info = []
            for frame, cam_info in zip(unsolved_frames,
                                       map(self._get_pos, unsolved_frames)):
                if cam_info is not None:
                    new_cams_info.append((frame, cam_info))

            if len(new_cams_info) == 0:
                print(f'Can not get more camera positions, '
                      f'{self.num_of_frames - num_of_defined_poses}'
                      f' frames left without defined camera position')
                self.tracked_poses = [
                    CameraInfo(view_mat3x4_to_pose(eye3x4()), 0)
                    if pos is None else pos for pos in self.tracked_poses
                ]
                break

            best_frame = None
            best_new_cam_info = None

            for frame, cam_info in new_cams_info:
                if best_new_cam_info is None or best_new_cam_info.inliers < cam_info.inliers:
                    best_new_cam_info = cam_info
                    best_frame = frame

            print('Added camera position for frame ', best_frame)
            print('Number of inliers: ', best_new_cam_info.inliers)

            self.tracked_poses[best_frame] = best_new_cam_info

            self._retriangulate_3d_points(best_frame)

            if step_num % 4 == 0:
                self._update_camera_poses()
            step_num += 1
            num_of_defined_poses = np.sum([
                tracked_pos_info is not None
                for tracked_pos_info in self.tracked_poses
            ])
            print(
                f'{num_of_defined_poses}/{self.num_of_frames} camera positions found, {len(self.point_cloud)} points in cloud'
            )

        self._update_camera_poses()

        ids, cloud_points = [], []
        for pt_id, clout_pt_info in self.point_cloud.items():
            ids.append(pt_id)
            cloud_points.append(clout_pt_info.pos)
        return list(map(lambda tracked_pos_info: tracked_pos_info.pos, self.tracked_poses)), \
               PointCloudBuilder(np.array(ids), np.array(cloud_points))