예제 #1
0
 def _set_initial_poses(self, known_view_1, known_view_2):
     view_mat_1 = pose_to_view_mat3x4(known_view_1[1])
     view_mat_2 = pose_to_view_mat3x4(known_view_2[1])
     self.tracked_poses[known_view_1[0]] = CameraInfo(
         view_mat_1, float('inf'))
     self.tracked_poses[known_view_2[0]] = CameraInfo(
         view_mat_2, float('inf'))
예제 #2
0
def track_camera(corner_storage, intrinsic_mat, known_view1, known_view2):
    n = len(corner_storage)
    point_cloud_builder = PointCloudBuilder()
    view_mat = [eye3x4()] * n

    view_mat[known_view1[0]] = pose_to_view_mat3x4(known_view1[1])
    view_mat[known_view2[0]] = pose_to_view_mat3x4(known_view2[1])

    min_angle = 1.0
    max_error = 1.0
    new_points = []
    ids = []
    correspondences = build_correspondences(corner_storage[known_view1[0]],
                                            corner_storage[known_view2[0]])
    while len(new_points) < 10:
        new_points, ids, _ = triangulate_correspondences(
            correspondences, pose_to_view_mat3x4(known_view1[1]),
            pose_to_view_mat3x4(known_view2[1]), intrinsic_mat,
            TriangulationParameters(max_error, min_angle, 0))
        min_angle -= 0.2
        max_error += 0.4
    point_cloud_builder.add_points(ids, new_points)

    for i in range(known_view1[0] - 1, -1, -1):
        solve_frame(i, corner_storage, point_cloud_builder, view_mat,
                    intrinsic_mat, 1)

    for i in range(known_view1[0] + 1, n):
        solve_frame(i, corner_storage, point_cloud_builder, view_mat,
                    intrinsic_mat, -1)
    return view_mat, point_cloud_builder
예제 #3
0
    def __init__(self, corner_storage: CornerStorage,
                 intrinsic_mat: np.ndarray,
                 parameters: TriangulationParameters,
                 known_view_1: Tuple[int, Pose], known_view_2: Tuple[int,
                                                                     Pose]):
        self._corner_storage = corner_storage
        self._intrinsic_mat = intrinsic_mat
        self._triangulation_parameters = parameters
        self._length = len(corner_storage)
        self._builder = PointCloudBuilder()
        self._track = [None] * self._length

        self._track[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
        self._track[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])
        self._update_cloud(known_view_1[0], known_view_2[0])
예제 #4
0
def triangulate(ind_1,
                ind_2,
                pose_1,
                pose_2,
                corner_storage,
                intrinsic_mat,
                ids_to_remove,
                parameters=TriangulationParameters(8.0, 0, 2)):
    frame_corners_1, frame_corners_2 = corner_storage[ind_1], corner_storage[
        ind_2]
    correspondences = build_correspondences(frame_corners_1, frame_corners_2,
                                            ids_to_remove)
    view_1, view_2 = pose_to_view_mat3x4(pose_1), pose_to_view_mat3x4(pose_2)
    return triangulate_correspondences(correspondences, view_1, view_2,
                                       intrinsic_mat, parameters)
예제 #5
0
    def __init__(self, corner_storage, intrinsic_mat, view_1, view_2):
        self.corner_storage = corner_storage
        self.frame_count = len(corner_storage)
        self.intrinsic_mat = intrinsic_mat
        self.triangulation_parameters = TriangulationParameters(1, 3, .1)
        self.view_mats = {}

        if view_1 is None or view_2 is None:
            print('Finding initial poses')
            pose1_idx, pose1, pose2_idx, pose2 = self.find_best_start_poses()
            print('Initial poses found in frames {0} and {1}'.format(pose1_idx, pose2_idx))
        else:
            pose1 = pose_to_view_mat3x4(view_1[1])
            pose2 = pose_to_view_mat3x4(view_2[1])
            pose1_idx = view_1[0]
            pose2_idx = view_2[0]

        corners_1 = self.corner_storage[pose1_idx]
        corners_2 = self.corner_storage[pose2_idx]

        corrs = build_correspondences(corners_1, corners_2)

        p, ids, med = triangulate_correspondences(corrs, pose1, pose2,
                                                  self.intrinsic_mat, self.triangulation_parameters)

        self.ids = ids
        self.point_cloud_builder = PointCloudBuilder(ids, p)
        self.point_frames = {}
        self.last_retriangulated = {}
        for i in ids:
            self.point_frames[i] = [pose1_idx, pose2_idx]
            self.last_retriangulated[i] = 2

        self.used_inliers = {}
        self.view_nones = {i for i in range(self.frame_count)}
        self.view_mats[pose1_idx] = pose1
        self.used_inliers[pose1_idx] = 0
        self.view_nones.remove(pose1_idx)
        self.view_mats[pose2_idx] = pose2
        self.used_inliers[pose2_idx] = 0
        self.view_nones.remove(pose2_idx)
        self.last_added_idx = pose2_idx
        self.last_inliers = []

        self.retriangulate_frames = 3
        self.max_repr_error = 1.5
        self.used_inliers_point = {}
        self.step = 0
예제 #6
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(
        camera_parameters,
        rgb_sequence[0].shape[0]
    )

    # TODO: implement
    frame_count = len(corner_storage)
    view_mats = [pose_to_view_mat3x4(known_view_1[1])] * frame_count
    corners_0 = corner_storage[0]
    point_cloud_builder = PointCloudBuilder(corners_0.ids[:1],
                                            np.zeros((1, 3)))

    calc_point_cloud_colors(
        point_cloud_builder,
        rgb_sequence,
        view_mats,
        intrinsic_mat,
        corner_storage,
        5.0
    )
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #7
0
def pose_by_frames(frame1: FrameCorners, frame2: FrameCorners,
                   intrinsic_mat: np.ndarray) -> Tuple[Pose, int]:
    correspondences = build_correspondences(frame1, frame2)
    mat, mask = cv2.findEssentialMat(correspondences.points_1,
                                     correspondences.points_2, intrinsic_mat,
                                     cv2.RANSAC, 0.99, 1)

    if mat is None or mat.shape != (3, 3):
        return None, 0

    if mask is not None:
        correspondences = _remove_correspondences_with_ids(
            correspondences, np.argwhere(mask.flatten() == 0))

    R1, R2, t = cv2.decomposeEssentialMat(mat)
    max_pose = None
    max_npoints = 0
    for mat in [R1.T, R2.T]:
        for vec in [t, -t]:
            pose = Pose(mat, mat @ vec)
            points, _, _ = triangulate_correspondences(
                correspondences, eye3x4(), pose_to_view_mat3x4(pose),
                intrinsic_mat, INITIAL_TRIANGULATION_PARAMETERS)
            if len(points) > max_npoints:
                max_pose = pose
                max_npoints = len(points)
    return max_pose, max_npoints
예제 #8
0
def calc_known_views(corner_storage, intrinsic_mat, indent=5):
    num_frames = len(corner_storage)
    known_view_1 = (None, None)
    known_view_2 = (None, None)
    num_points = -1

    for frame_1 in range(num_frames):
        for frame_2 in range(frame_1 + indent, num_frames):
            corrs = build_correspondences(corner_storage[frame_1],
                                          corner_storage[frame_2])

            if len(corrs.ids) < 6:
                continue

            points_1 = corrs.points_1
            points_2 = corrs.points_2

            H, mask_h = cv2.findHomography(points_1,
                                           points_2,
                                           method=cv2.RANSAC)
            if mask_h is None:
                continue

            mask_h = mask_h.reshape(-1)

            E, mask_e = cv2.findEssentialMat(points_1,
                                             points_2,
                                             method=cv2.RANSAC,
                                             cameraMatrix=intrinsic_mat)

            if mask_e is None:
                continue

            mask_e = mask_e.reshape(-1)

            if mask_h.sum() / mask_e.sum() > 0.5:
                continue

            corrs = Correspondences(corrs.ids[(mask_e == 1)],
                                    points_1[(mask_e == 1)],
                                    points_2[(mask_e == 1)])

            R1, R2, t = cv2.decomposeEssentialMat(E)

            for poss_pose in [
                    Pose(R1.T, R1.T @ t),
                    Pose(R1.T, R1.T @ (-t)),
                    Pose(R2.T, R2.T @ t),
                    Pose(R2.T, R2.T @ (-t))
            ]:
                points3d, _, _ = triangulate_correspondences(
                    corrs, eye3x4(), pose_to_view_mat3x4(poss_pose),
                    intrinsic_mat, TriangulationParameters(1, 2, .1))

                if len(points3d) > num_points:
                    num_points = len(points3d)
                    known_view_1 = (frame_1, view_mat3x4_to_pose(eye3x4()))
                    known_view_2 = (frame_2, poss_pose)
    return known_view_1, known_view_2
def select_frames(frames, corner_storage, camera_matrix):
    frame1 = (0, view_mat3x4_to_pose(eye3x4()))
    frame2 = (-1, view_mat3x4_to_pose(eye3x4()))

    mx = 0

    for i in range(1, len(frames)):
        correspondences = build_correspondences(corner_storage[frame1[0]],
                                                corner_storage[i])
        if len(correspondences.ids) < 8:
            continue

        E, mask = cv2.findEssentialMat(correspondences.points_1,
                                       correspondences.points_2,
                                       camera_matrix,
                                       method=cv2.RANSAC)

        if mask is None:
            continue

        correspondences = _remove_correspondences_with_ids(
            correspondences, np.argwhere(mask.flatten() == 0))
        if len(correspondences.ids) < 8 or not validate(correspondences, mask):
            continue

        R1, R2, t = cv2.decomposeEssentialMat(E)
        ps = [
            Pose(R1.T, R1.T @ t),
            Pose(R2.T, R2.T @ t),
            Pose(R1.T, R1.T @ (-t)),
            Pose(R2.T, R2.T @ (-t))
        ]

        for pose in ps:
            points, _, _ = triangulate_correspondences(
                correspondences, pose_to_view_mat3x4(frame1[1]),
                pose_to_view_mat3x4(pose), camera_matrix, TRIANG_PARAMS)

            if len(points) > mx:
                frame2 = (i, pose)
                mx = len(points)

    print(frame1[0], frame2[0])

    return frame1, frame2
예제 #10
0
def calc_starting_points(
        intrinsic_mat: np.ndarray, corner_storage: CornerStorage,
        known_view_1: Tuple[int, Pose],
        known_view_2: Tuple[int, Pose]) -> Tuple[np.ndarray, np.ndarray]:
    id1, pose1 = known_view_1
    id2, pose2 = known_view_2

    corners1 = corner_storage[id1]
    corners2 = corner_storage[id2]

    mat1 = pose_to_view_mat3x4(pose1)
    mat2 = pose_to_view_mat3x4(pose2)

    correspondences = build_correspondences(corners1, corners2)
    points_3d, points_ids, _ = triangulate_correspondences(
        correspondences, mat1, mat2, intrinsic_mat,
        INITIAL_TRIANGULATION_PARAMETERS)
    return points_3d, points_ids
예제 #11
0
    def __init__(self, intrinsic_mat, corner_storage, known_view_1, known_view_2):
        self.intrinsic_mat = intrinsic_mat
        self.corner_storage = corner_storage
        frame_num_1 = known_view_1[0]
        frame_num_2 = known_view_2[0]
        self.pc_builder = PointCloudBuilder()
        view_matr_1 = pose_to_view_mat3x4(known_view_1[1])
        view_matr_2 = pose_to_view_mat3x4(known_view_2[1])

        self.tracked_views = [eye3x4()] * len(self.corner_storage)
        self.tracked_views[frame_num_1] = view_matr_1
        self.tracked_views[frame_num_2] = view_matr_2
        self.initial_baseline = get_baseline(view_matr_1, view_matr_2)
        print("initial baseline =", self.initial_baseline)

        self._add_points_from_frame(frame_num_1, frame_num_2, initial_triangulation=True)
        self.min_init_frame = min(frame_num_1, frame_num_2)
        self.max_init_frame = max(frame_num_1, frame_num_2)
        self.outliers = set()
예제 #12
0
def get_pose_with_score(frame_1, frame_2, corner_storage, intrinsic_mat):
    corners1, corners2 = corner_storage[frame_1], corner_storage[frame_2]

    correspondences = build_correspondences(corners1, corners2)

    essential_mat, mask_essential = cv2.findEssentialMat(correspondences[1],
                                                         correspondences[2],
                                                         intrinsic_mat,
                                                         method=cv2.RANSAC,
                                                         threshold=1.0)

    if essential_mat is None or mask_essential is None:
        return None, 0

    _, mask_homography = cv2.findHomography(correspondences[1],
                                            correspondences[2],
                                            method=cv2.RANSAC)

    essential_inliers, homography_inliers = mask_essential.flatten().sum(
    ), mask_homography.flatten().sum()

    if homography_inliers > essential_inliers * 0.5:
        return None, 0

    correspondences = _remove_correspondences_with_ids(
        correspondences, np.argwhere(mask_essential == 0))

    R1, R2, t = cv2.decomposeEssentialMat(essential_mat)

    candidates = [
        Pose(R1.T, R1.T @ t),
        Pose(R1.T, R1.T @ (-t)),
        Pose(R2.T, R2.T @ t),
        Pose(R2.T, R2.T @ (-t))
    ]

    best_pose_score, best_pose = 0, None

    triangulation_parameters = TriangulationParameters(1, 2, .1)
    for pose in candidates:
        points, _, _ = triangulate_correspondences(correspondences, eye3x4(),
                                                   pose_to_view_mat3x4(pose),
                                                   intrinsic_mat,
                                                   triangulation_parameters)
        if len(points) > best_pose_score:
            best_pose_score = len(points)
            best_pose = pose

    return best_pose, best_pose_score
예제 #13
0
 def find_best_start_poses(self):
     best_i = 0
     best_j = 0
     best_j_pose = None
     best_pose_points = 0
     step = self.frame_count // 20
     for i in range(self.frame_count):
         for j in range(i + 5, self.frame_count, step):
             pose, pose_points = self.get_pose(i, j)
             if pose_points > best_pose_points:
                 best_i = i
                 best_j = j
                 best_j_pose = pose
                 best_pose_points = pose_points
     return best_i, eye3x4(), best_j, pose_to_view_mat3x4(best_j_pose)
예제 #14
0
def find_view_by_two_frames(ids_1: np.ndarray, ids_2: np.ndarray,
                            corners_1: np.ndarray, corners_2: np.ndarray,
                            intrinsic_mat: np.ndarray) \
    -> Tuple[Optional[np.ndarray], Optional[np.ndarray], int]:
    correspondences = build_correspondences(ids_1, ids_2, corners_1, corners_2)

    if len(correspondences.ids) < 7:
        return None, None, 0

    mat, mat_mask = cv2.findEssentialMat(
        correspondences.points_1,
        correspondences.points_2,
        intrinsic_mat,
        method=cv2.RANSAC,
        prob=RANSAC_P,
        threshold=MAX_EPIPOL_LINE_DIST
    )

    mat_mask = mat_mask.flatten()
    correspondences = remove_correspondences_with_ids(
        correspondences, np.argwhere(mat_mask == 0).astype(np.int32))

    view_1 = eye3x4()

    best_view = None
    best_count = -1

    rotation_1, rotation_2, translation = cv2.decomposeEssentialMat(mat)
    rotation_1, rotation_2 = rotation_1.T, rotation_2.T
    for r in (rotation_1, rotation_2):
        for t in (translation, -translation):
            view_2 = pose_to_view_mat3x4(Pose(r, r @ t))

            _, point_ids, _ = triangulate_correspondences(
                correspondences,
                view_1, view_2,
                intrinsic_mat,
                TRIANGULATION_PARAMETERS
            )

            if best_count < len(point_ids):
                best_view = view_2
                best_count = len(point_ids)

    return view_1, best_view, best_count
예제 #15
0
 def __init__(self,
              camera_parameters: CameraParameters,
              corner_storage: CornerStorage,
              frame_sequence_path: str,
              known_view_1: Optional[Tuple[int, Pose]],
              known_view_2: Optional[Tuple[int, Pose]]
              ):
     self.rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
     self.frame_count = len(self.rgb_sequence)
     self.camera_parameters = camera_parameters
     self.corner_storage = without_short_tracks(corner_storage, CORNER_MIN_TRACK_LENGTH)
     self.intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters, self.rgb_sequence[0].shape[0])
     self.point_cloud_builder = PointCloudBuilder()
     self.known_view_mats = {}
     self.unknown_view_ids = set(range(self.frame_count))
     if known_view_1 is None or known_view_2 is None:
         self.initialize_known_views()
     else:
         for (frame_idx, pose) in (known_view_1, known_view_2):
             self.update_view(frame_idx, pose_to_view_mat3x4(pose))
예제 #16
0
    def get_pose(self, frame_1, frame_2):
        p1 = self.corner_storage[frame_1]
        p2 = self.corner_storage[frame_2]

        corresp = build_correspondences(p1, p2)

        if len(corresp.ids) < 6:
            return None, 0

        E, mask_essential = cv2.findEssentialMat(corresp[1], corresp[2], self.intrinsic_mat, method=cv2.RANSAC,
                                                 threshold=1.0)

        _, mask_homography = cv2.findHomography(corresp[1], corresp[2], method=cv2.RANSAC)

        if mask_essential is None or mask_homography is None:
            return None, 0

        essential_inliers = mask_essential.flatten().sum()
        homography_inliers = mask_homography.flatten().sum()

        if homography_inliers / essential_inliers > 0.5:
            return None, 0

        corresp = _remove_correspondences_with_ids(corresp, np.argwhere(mask_essential == 0))

        R1, R2, t = cv2.decomposeEssentialMat(E)

        possible_poses = [Pose(R1.T, R1.T @ t), Pose(R1.T, R1.T @ (-t)), Pose(R2.T, R2.T @ t), Pose(R2.T, R2.T @ (-t))]

        best_pose_points = 0
        best_pose = None

        for pose in possible_poses:
            p, ids, med = triangulate_correspondences(corresp, eye3x4(), pose_to_view_mat3x4(pose), self.intrinsic_mat, self.triangulation_parameters)
            if len(p) > best_pose_points:
                best_pose_points = len(p)
                best_pose = pose

        return best_pose, best_pose_points
예제 #17
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    view_mats = np.zeros((len(corner_storage), 3, 4), dtype=np.float64)
    processed_frames = np.zeros(len(corner_storage), dtype=np.bool)
    points3d = np.zeros((corner_storage.max_corner_id() + 1, 3),
                        dtype=np.float64)
    added_points = np.zeros(corner_storage.max_corner_id() + 1, dtype=np.bool)

    print('Trying to extract 3d points from known frames...')

    view_mats[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])
    processed_frames[known_view_1[0]] = processed_frames[
        known_view_2[0]] = True
    extracted_points = extract_points(corner_storage[known_view_1[0]],
                                      corner_storage[known_view_2[0]],
                                      view_mats[known_view_1[0]],
                                      view_mats[known_view_2[0]],
                                      intrinsic_mat)
    if not extracted_points:
        print(
            'Extracting 3d points from known frames failed: '
            'either there are no common points, or triangulation angle between frames is too small.\n'
            'Try to choose another initial frames.',
            file=sys.stderr)
        exit(0)
    print('Succeeded! Trying to build point cloud...')

    added_points[extracted_points[0]] = True
    points3d[extracted_points[0]] = extracted_points[1]

    was_updated = True
    while was_updated:
        was_updated = False
        unprocessed_frames = np.arange(len(corner_storage),
                                       dtype=np.int32)[~processed_frames]
        for frame in unprocessed_frames:
            points3d_ids = np.arange(corner_storage.max_corner_id() + 1,
                                     dtype=np.int32)[added_points]
            common, (indices1, indices2) = snp.intersect(
                points3d_ids,
                corner_storage[frame].ids.flatten(),
                indices=True)
            if len(common) <= 5:
                continue

            try:
                print(f'Processing frame {frame}: ', end='')
                retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                    objectPoints=points3d[common],
                    imagePoints=corner_storage[frame].points[indices2],
                    cameraMatrix=intrinsic_mat,
                    distCoeffs=None)
            except:
                retval = False

            if not retval:
                print(f'failed to solve PnP RANSAC, trying another frame')
                continue

            print(f'succeeded, found {len(inliers)} inliers')
            was_updated = True
            view_mats[frame] = rodrigues_and_translation_to_view_mat3x4(
                rvec, tvec)
            for processed_frame in np.arange(len(corner_storage),
                                             dtype=np.int32)[processed_frames]:
                extracted_points = extract_points(
                    corner_storage[frame], corner_storage[processed_frame],
                    view_mats[frame], view_mats[processed_frame],
                    intrinsic_mat, points3d_ids)

                if extracted_points:
                    added_points[extracted_points[0]] = True
                    points3d[extracted_points[0]] = extracted_points[1]
            processed_frames[frame] = True
            print(f'Current point cloud contains {sum(added_points)} points')

    for _ in range(2):
        for i in range(1, len(corner_storage)):
            if not processed_frames[i]:
                processed_frames[i] = True
                view_mats[i] = view_mats[i - 1]
        processed_frames = processed_frames[::-1]
        view_mats = view_mats[::-1]

    print(
        f'Finished building point cloud, now it contains {sum(added_points)} points'
    )

    point_cloud_builder = PointCloudBuilder(ids=np.arange(
        corner_storage.max_corner_id() + 1, dtype=np.int32)[added_points],
                                            points=points3d[added_points])

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #18
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    triang_params = TriangulationParameters(
        max_reprojection_error=MAX_PROJ_ERROR,
        min_triangulation_angle_deg=MIN_TRIANG_ANG,
        min_depth=MIN_DEPTH)

    correspondences = build_correspondences(corner_storage[known_view_1[0]],
                                            corner_storage[known_view_2[0]])
    view_mat_1 = pose_to_view_mat3x4(known_view_1[1])
    view_mat_2 = pose_to_view_mat3x4(known_view_2[1])
    points, ids, median_cos = triangulate_correspondences(
        correspondences, view_mat_1, view_mat_2, intrinsic_mat, triang_params)
    if len(points) < 10:
        print("\rPlease, try other frames")
        exit(0)

    view_mats = [None] * len(rgb_sequence)
    view_mats[known_view_1[0]] = view_mat_1
    view_mats[known_view_2[0]] = view_mat_2

    point_cloud_builder = PointCloudBuilder(ids, points)
    was_update = True
    while was_update:
        was_update = False
        for i, (frame, corners) in enumerate(zip(rgb_sequence,
                                                 corner_storage)):
            if view_mats[i] is not None:
                continue
            _, (idx_1,
                idx_2) = snp.intersect(point_cloud_builder.ids.flatten(),
                                       corners.ids.flatten(),
                                       indices=True)
            try:
                retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                    point_cloud_builder.points[idx_1],
                    corners.points[idx_2],
                    intrinsic_mat,
                    distCoeffs=None)
                inliers = np.array(inliers, dtype=int)
                if len(inliers) > 0:
                    view_mats[i] = rodrigues_and_translation_to_view_mat3x4(
                        rvec, tvec)
                    was_update = True
                print(
                    f"\rFrame {i} out of {len(rgb_sequence)}, inliners: {len(inliers)}",
                    end="")
            except Exception:
                print(f"\rFrame {i} out of {len(rgb_sequence)}, inliners: {0}",
                      end="")
            if view_mats[i] is None:
                continue
            cur_corner = filter_frame_corners(corner_storage[i],
                                              inliers.flatten())

            for j in range(len(rgb_sequence)):
                if view_mats[j] is None:
                    continue
                correspondences = build_correspondences(
                    corner_storage[j], cur_corner)
                if len(correspondences.ids) == 0:
                    continue
                points, ids, median_cos = triangulate_correspondences(
                    correspondences, view_mats[j], view_mats[i], intrinsic_mat,
                    triang_params)
                point_cloud_builder.add_points(ids, points)

        print()

    first_mat = next((mat for mat in view_mats if mat is not None), None)
    if first_mat is None:
        print("\rFail")
        exit(0)

    view_mats[0] = first_mat
    for i in range(1, len(view_mats)):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i - 1]
    view_mats = np.array(view_mats)

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #19
0
def track_and_calc_colors(
    camera_parameters: CameraParameters,
    corner_storage: CornerStorage,
    frame_sequence_path: str,
    known_view_1: Optional[Tuple[int, Pose]] = None,
    known_view_2: Optional[Tuple[int, Pose]] = None
) -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])
    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = initial_camera_views(
            corner_storage, intrinsic_mat)
    print(known_view_1[1])
    print(known_view_2[1])

    print(known_view_1[0], known_view_2[0])

    print(intrinsic_mat)

    points_3d, points_ids = calc_starting_points(intrinsic_mat, corner_storage,
                                                 known_view_1, known_view_2)

    #initial_points_3d, initial_points_ids = points_3d.copy(), points_ids.copy()

    n_points = corner_storage.max_corner_id() + 1
    res_points_3d = np.full((n_points, 3), None)
    #res_points_3d[points_ids] = points_3d

    view_mats = [np.full((3, 4), None) for _ in range(len(corner_storage))]
    view_mats[known_view_1[0]], view_mats[
        known_view_2[0]] = pose_to_view_mat3x4(
            known_view_1[1]), pose_to_view_mat3x4(known_view_2[1])

    print(f'len(corner_storage):{len(corner_storage)}')

    id1 = known_view_1[0]
    for n_frame, corners in enumerate(corner_storage[id1 + 1:], id1 + 1):
        common_obj, common_img, common_ids = get_common_points(
            corners, points_ids, points_3d)

        mat, points_3d, points_ids, reprojection_error = build_view_mat(
            common_obj, common_img, common_ids, intrinsic_mat)
        points_ids = np.array(points_ids)
        view_mats[n_frame] = mat

        is_null = np.array([(x[0] is None) for x in res_points_3d[points_ids]])
        #res_points_3d[points_ids[is_null]] = points_3d[is_null]

        n_inliers = len(points_3d)

        points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                corner_storage, view_mats,
                                                points_3d, points_ids)

        print(
            f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
            f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
        )

    for n_frame, corners in list(enumerate(corner_storage))[::-1]:
        common_obj, common_img, common_ids = get_common_points(
            corners, points_ids, points_3d)

        mat, points_3d, points_ids, reprojection_error = build_view_mat(
            common_obj, common_img, common_ids, intrinsic_mat)
        view_mats[n_frame] = mat

        is_null = np.array([(x[0] is None) for x in res_points_3d[points_ids]])
        #res_points_3d[points_ids[is_null]] = points_3d[is_null]

        n_inliers = len(points_3d)

        points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                corner_storage, view_mats,
                                                points_3d, points_ids)

        print(
            f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
            f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
        )

    # Approximating

    n_iter = 1
    for iter in range(n_iter):
        for n_frame, corners in enumerate(corner_storage):
            common_obj, common_img, common_ids = get_common_points(
                corners, points_ids, points_3d)

            mat, points_3d, points_ids, reprojection_error = build_view_mat(
                common_obj, common_img, common_ids, intrinsic_mat)

            view_mats[n_frame] = mat
            if iter == n_iter - 1:
                res_points_3d[points_ids] = points_3d

            n_inliers = len(points_3d)

            points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                    corner_storage, view_mats,
                                                    points_3d, points_ids)

            print(
                f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
                f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
            )

        for n_frame, corners in list(enumerate(corner_storage))[::-1]:
            common_obj, common_img, common_ids = get_common_points(
                corners, points_ids, points_3d)

            mat, points_3d, points_ids, reprojection_error = build_view_mat(
                common_obj, common_img, common_ids, intrinsic_mat)

            view_mats[n_frame] = mat
            if iter == n_iter - 1:
                res_points_3d[points_ids] = points_3d

            n_inliers = len(points_3d)

            points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                    corner_storage, view_mats,
                                                    points_3d, points_ids)

            print(
                f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
                f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
            )

    res_points_ids = np.array(
        [i for i, x in enumerate(res_points_3d) if x[0] is not None])
    res_points_3d = np.array(res_points_3d[res_points_ids], dtype=float)

    point_cloud_builder = PointCloudBuilder(ids=res_points_ids,
                                            points=res_points_3d)

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)

    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #20
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])
    i_1 = known_view_1[0]
    i_2 = known_view_2[0]
    print('Known frames are', i_1, 'and', i_2)
    global INLIERS_MIN_SIZE, DELTA, MIN_SIZE, TRIANG_PARAMS
    view_mat3x4_1 = pose_to_view_mat3x4(known_view_1[1])
    view_mat3x4_2 = pose_to_view_mat3x4(known_view_2[1])
    correspondences = build_correspondences(corner_storage[i_1],
                                            corner_storage[i_2])
    print(len(correspondences.ids))
    points3d, corr_ids, median_cos = triangulate_correspondences(
        correspondences, view_mat3x4_1, view_mat3x4_2, intrinsic_mat,
        TRIANG_PARAMS)
    view_mats, point_cloud_builder = [view_mat3x4_1 for _ in corner_storage
                                      ], PointCloudBuilder(
                                          corr_ids.astype(np.int64), points3d)
    err_indexes = set(range(len(corner_storage)))
    err_indexes.remove(i_1)
    err_indexes.remove(i_2)
    res_code, rodrig, inliers_1, cloud_points = get_ransac(
        point_cloud_builder, corner_storage[i_1], intrinsic_mat)
    view_mats[i_1] = rodrig
    res_code, rodrig, inliers_2, cloud_points = get_ransac(
        point_cloud_builder, corner_storage[i_2], intrinsic_mat)
    view_mats[i_2] = rodrig
    INLIERS_MIN_SIZE = min(len(inliers_1), len(inliers_2)) - 20 * max(
        len(corner_storage) // 100, 1)
    descr_template = 'Point cloud calc - {} inliers, {} points found, cloud size is {}'

    tqdm_iterator = tqdm(range(len(corner_storage) - 2),
                         total=len(corner_storage),
                         desc=descr_template.format('?', '?', '?'))

    params = [
        corner_storage, view_mats, point_cloud_builder, intrinsic_mat,
        err_indexes, descr_template, tqdm_iterator
    ]
    DELTA = 5 * max(len(corner_storage) // 100, 1)
    MIN_SIZE = 20 * max(len(corner_storage) // 100, 1)
    TRIANG_PARAMS = TriangulationParameters(2, 1e-2, 1e-2)
    for i in range(i_1 + 1, i_2):
        update(i, *params)
    print('Points between handled')
    DELTA = 20 * max(len(corner_storage) // 100, 1)
    MIN_SIZE = 20 * max(len(corner_storage) // 100, 1)
    TRIANG_PARAMS = TriangulationParameters(4, 1e-2, 1e-2)
    for i in range(i_1, -1, -1):
        if i in err_indexes:
            update(i, *params)
    for i in range(i_2, len(corner_storage)):
        if i in err_indexes:
            update(i, *params)

    for i in range(i_1, -1, -1):
        if i in err_indexes:
            view_mats[i] = view_mats[i + 1]

    for i in range(i_2, len(corner_storage)):
        if i in err_indexes:
            view_mats[i] = view_mats[i - 1]
    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #21
0
def track_and_find_point_cloud(intrinsic_mat, corner_storage, known_view_1, known_view_2):
    num_frames = len(corner_storage)

    view_mats = [None] * num_frames
    point_cloud_builder = PointCloudBuilder()

    print(f'Frames with known views: {known_view_1[0]} and {known_view_2[0]}')
    view_mats[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])

    triang_params = TriangulationParameters(max_reprojection_error=MAX_REPR_ERR,
                                            min_triangulation_angle_deg=MIN_TRIANG_ANGLE_DEG,
                                            min_depth=MIN_DEPTH)

    add_points_to_cloud(point_cloud_builder,
                        corner_storage[known_view_1[0]],
                        corner_storage[known_view_2[0]],
                        view_mats[known_view_1[0]],
                        view_mats[known_view_2[0]],
                        intrinsic_mat,
                        triang_params)

    while True:
        was_updated = False

        for i in range(num_frames):
            if view_mats[i] is not None:
                continue

            print(f"\nCurrent frame: {i}")

            # find intersection of current point cloud and current frame
            corners = corner_storage[i]
            corner_ids = []
            points_3d = []
            points_2d = []

            for id, corner in zip(corners.ids, corners.points):
                if id not in point_cloud_builder.ids:
                    continue
                ind_in_builder, _ = np.nonzero(point_cloud_builder.ids == id)
                corner_ids.append(id)
                points_3d.append(point_cloud_builder.points[ind_in_builder[0]])
                points_2d.append(corner)

            print(f"Size of intersection of current point cloud and current frame: {len(corner_ids)}")

            if len(corner_ids) < 5:
                continue

            points_3d = np.array(points_3d)
            points_2d = np.array(points_2d)

            retval, rvec, tvec, inliers = cv2.solvePnPRansac(points_3d.reshape((-1, 1, 3)),
                                                             points_2d.reshape((-1, 1, 2)),
                                                             cameraMatrix=intrinsic_mat,
                                                             distCoeffs=None,
                                                             reprojectionError=PNP_REPROJ_ERROR)

            if not retval:
                print("Unsuccessful solution of PnP")
                continue

            print(f"Position found by {len(inliers)} inliers")

            view_mat = rodrigues_and_translation_to_view_mat3x4(rvec, tvec)
            view_mats[i] = view_mat

            for j in range(num_frames):
                if i != j and view_mats[j] is not None:
                    points_added = add_points_to_cloud(point_cloud_builder,
                                        corner_storage[i],
                                        corner_storage[j],
                                        view_mats[i],
                                        view_mats[j],
                                        intrinsic_mat,
                                        triang_params)
                    if points_added > 0:
                        was_updated = True

            print(f"Current size of point cloud: {point_cloud_builder.points.size}")

        if was_updated is False:
            break

    for i in range(num_frames):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i - 1]

    return view_mats, point_cloud_builder
예제 #22
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    frames_num = len(rgb_sequence)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = select_frames(rgb_sequence,
                                                   corner_storage,
                                                   intrinsic_mat)
        if known_view_2[0] == -1:
            print("Failed to find good starting frames")
            exit(0)

    correspondences = build_correspondences(corner_storage[known_view_1[0]],
                                            corner_storage[known_view_2[0]])
    view_mat1 = pose_to_view_mat3x4(known_view_1[1])
    view_mat2 = pose_to_view_mat3x4(known_view_2[1])

    points, ids, _ = triangulate_correspondences(correspondences, view_mat1,
                                                 view_mat2, intrinsic_mat,
                                                 TRIANG_PARAMS)

    if len(ids) < 8:
        print("Bad frames: too few correspondences!")
        exit(0)

    view_mats, point_cloud_builder = [None] * frames_num, PointCloudBuilder(
        ids, points)
    view_mats[known_view_1[0]] = view_mat1
    view_mats[known_view_2[0]] = view_mat2

    updated = True
    pass_num = 0
    while updated:
        updated = False
        pass_num += 1

        for i in range(frames_num):
            if view_mats[i] is not None:
                continue

            corners = corner_storage[i]

            _, ind1, ind2 = np.intersect1d(point_cloud_builder.ids,
                                           corners.ids.flatten(),
                                           return_indices=True)
            try:
                _, rvec, tvec, inliers = cv2.solvePnPRansac(
                    point_cloud_builder.points[ind1],
                    corners.points[ind2],
                    intrinsic_mat,
                    distCoeffs=None)

                if inliers is None:
                    print(f"Pass {pass_num}, frame {i}. No inliers!")
                    continue

                print(
                    f"Pass {pass_num}, frame {i}. Number of inliers == {len(inliers)}"
                )

                view_mats[i] = rodrigues_and_translation_to_view_mat3x4(
                    rvec, tvec)
            except Exception:
                continue

            updated = True

            cloud_changed = add_points(point_cloud_builder, corner_storage, i,
                                       view_mats, intrinsic_mat)
            if cloud_changed:
                print(f"Size of point cloud == {len(point_cloud_builder.ids)}")

    first_not_none = next(item for item in view_mats if item is not None)
    if view_mats[0] is None:
        view_mats[0] = first_not_none

    for i in range(frames_num):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i - 1]

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)

    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #23
0
def track_and_calc_colors(
    camera_parameters: CameraParameters,
    corner_storage: CornerStorage,
    frame_sequence_path: str,
    known_view_1: Optional[Tuple[int, Pose]] = None,
    known_view_2: Optional[Tuple[int, Pose]] = None
) -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = calculate_known_views(
            intrinsic_mat, corner_storage)
        print('Calculated known views: {} and {}'.format(
            known_view_1[0], known_view_2[0]))

    random.seed(239)

    view_mats = np.full((len(rgb_sequence), ), None)
    view_mats[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])

    point_cloud_builder = PointCloudBuilder()

    def triangulate_and_add_points(corners1, corners2, view_mat1, view_mat2):
        points3d, ids, median_cos = triangulate_correspondences(
            build_correspondences(corners1, corners2), view_mat1, view_mat2,
            intrinsic_mat, triang_params)
        point_cloud_builder.add_points(ids, points3d)

    triangulate_and_add_points(corner_storage[known_view_1[0]],
                               corner_storage[known_view_2[0]],
                               view_mats[known_view_1[0]],
                               view_mats[known_view_2[0]])

    inliers_points3d, inliers_points2d = None, None

    def find_errs(rtvecs):
        rvecs, tvecs = rtvecs[:3], rtvecs[3:]
        rmat, _ = cv2.Rodrigues(np.expand_dims(rvecs, axis=1))
        rmat = np.concatenate((rmat, np.expand_dims(tvecs, axis=1)), axis=1)
        return (project_points(inliers_points3d, intrinsic_mat @ rmat) -
                inliers_points2d).flatten()

    while True:
        random_ids = list(range(len(view_mats)))
        random.shuffle(random_ids)
        found_new_view_mat = False
        for i in random_ids:
            if view_mats[i] is not None:
                continue
            common, (ids1,
                     ids2) = snp.intersect(point_cloud_builder.ids.flatten(),
                                           corner_storage[i].ids.flatten(),
                                           indices=True)
            if len(common) <= 10:
                continue

            points3d = point_cloud_builder.points[ids1]
            points2d = corner_storage[i].points[ids2]
            retval, rvecs, tvecs, inliers = cv2.solvePnPRansac(
                points3d,
                points2d,
                intrinsic_mat,
                iterationsCount=108,
                reprojectionError=triang_params.max_reprojection_error,
                distCoeffs=None,
                confidence=0.999,
                flags=cv2.SOLVEPNP_EPNP)
            if retval:
                # M-оценки
                inliers_points3d = points3d[inliers.flatten()]
                inliers_points2d = points2d[inliers.flatten()]
                rtvecs = least_squares(find_errs,
                                       x0=np.concatenate(
                                           (rvecs, tvecs)).flatten(),
                                       loss='huber',
                                       method='trf').x
                rvecs = rtvecs[:3].reshape((-1, 1))
                tvecs = rtvecs[3:].reshape((-1, 1))
            if not retval:
                continue

            print(
                'Iteration {}/{}, processing {}th frame: {} inliers, {} points in point cloud'
                .format(
                    len([v for v in view_mats if v is not None]) - 1,
                    len(rgb_sequence) - 2, i, len(inliers),
                    len(point_cloud_builder.points)))

            view_mats[i] = rodrigues_and_translation_to_view_mat3x4(
                rvecs, tvecs)
            found_new_view_mat = True

            # Add new points
            inliers = np.array(inliers).astype(int).flatten()
            inlier_corners = FrameCorners(
                *[c[inliers] for c in corner_storage[i]])
            for j in range(len(view_mats)):
                if view_mats[j] is not None:
                    triangulate_and_add_points(corner_storage[j],
                                               inlier_corners, view_mats[j],
                                               view_mats[i])

            sys.stdout.flush()

        if not found_new_view_mat:
            break

    for i in range(0, len(view_mats)):
        if view_mats[i] is None:
            print('Я сдох: не все кадры обработаны :(')
            exit(1)

    if len(view_mats) < 100:  # Иначе долго работает
        view_mats = bundle_adjustment(view_mats, point_cloud_builder,
                                      intrinsic_mat, corner_storage)

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #24
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = get_matrix_poses(corner_storage,
                                                      intrinsic_mat)

    video_size = len(rgb_sequence)
    frame_trackers = [
        FrameTrack(i, corner_storage[i]) for i in range(video_size)
    ]
    known_tracker_creator = lambda x: FrameTrack(x[0], corner_storage[x[0]],
                                                 pose_to_view_mat3x4(x[1]))

    frame_trackers[known_view_1[0]] = known_tracker_creator(known_view_1)
    frame_trackers[known_view_2[0]] = known_tracker_creator(known_view_2)

    init_params = triang_params

    for angle in range(90, 0, -2):
        params = TriangulationParameters(
            max_reprojection_error=MAX_REPROJECTION_ERROR,
            min_triangulation_angle_deg=angle,
            min_depth=0.001)
        _, points = triangulate_trackers(frame_trackers[known_view_1[0]],
                                         frame_trackers[known_view_2[0]],
                                         intrinsic_mat, params)
        if len(points) > 100:
            print(f"Chosen init angle: {angle}")
            init_params = params
            break

    ids, points = triangulate_trackers(frame_trackers[known_view_1[0]],
                                       frame_trackers[known_view_2[0]],
                                       intrinsic_mat, init_params)

    point_cloud_builder = PointCloudBuilder(ids, points)
    if len(points) < MIN_STARTING_POINTS:
        print(
            f"Not enough starting points ({len(points)}), please choose another initial frames pair"
            f"\n0, 20 is a good pair for short fox, ")
        return [], PointCloudBuilder()

    frame_trackers[known_view_1[0]].update_reproj_error(
        point_cloud_builder, intrinsic_mat)
    frame_trackers[known_view_2[0]].update_reproj_error(
        point_cloud_builder, intrinsic_mat)

    point_cloud_builder = track(ITERATIONS, frame_trackers,
                                point_cloud_builder, intrinsic_mat)

    view_mats = [x.mtx for x in frame_trackers]
    for i in range(1, len(view_mats)):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i - 1]
    for i in range(len(view_mats) - 2, -1, -1):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i + 1]

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #25
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                            frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = calc_known_views(corner_storage,
                                                      intrinsic_mat)

    # TODO: implement
    num_frames_retr = 10
    triang_params = TriangulationParameters(8.0, 1.0, .1)

    num_frames = len(corner_storage)

    frame_1 = known_view_1[0]
    frame_2 = known_view_2[0]

    print(f'{frame_1}, {frame_2}')

    tvecs = [None] * num_frames
    tvecs[frame_1] = known_view_1[1].t_vec
    tvecs[frame_2] = known_view_2[1].t_vec

    view_mat_1 = pose_to_view_mat3x4(known_view_1[1])
    view_mat_2 = pose_to_view_mat3x4(known_view_2[1])

    view_mats = [None] * num_frames
    view_mats[frame_1] = view_mat_1
    view_mats[frame_2] = view_mat_2

    cloud = {}

    corrs = build_correspondences(corner_storage[frame_1],
                                  corner_storage[frame_2])
    points3d, ids, _ = triangulate_correspondences(corrs, view_mat_1,
                                                   view_mat_2, intrinsic_mat,
                                                   triang_params)
    for point3d, id in zip(points3d, ids):
        cloud[id] = point3d

    current_corners_occurences = {}
    corner_to_frames = {}

    for i, corners in enumerate(corner_storage):
        for j, id in enumerate(corners.ids.flatten()):
            current_corners_occurences[id] = 0
            if id not in corner_to_frames.keys():
                corner_to_frames[id] = [[i, j]]
            else:
                corner_to_frames[id].append([i, j])

    while len(untracked_frames(view_mats)) > 0:
        untr_frames = untracked_frames(view_mats)

        max_num_inl = -1
        best_frame = -1
        best_rvec = None
        best_tvec = None

        for frame in untr_frames:
            rvec, tvec, num_inl = calc_camera_pose(frame, corner_storage,
                                                   cloud, intrinsic_mat)
            if rvec is not None:
                if num_inl > max_num_inl:
                    best_frame = frame
                    max_num_inl = num_inl
                    best_rvec = rvec
                    best_tvec = tvec

        if max_num_inl == -1:
            break

        corners_to_add = []
        for id in corner_storage[best_frame].ids.flatten():
            if id in current_corners_occurences.keys():
                current_corners_occurences[id] += 1
                if current_corners_occurences[id] >= 2:
                    corners_to_add.append(id)

        for corner in corners_to_add:
            add_new_point(corner, corner_to_frames, view_mats, tvecs,
                          corner_storage, cloud, current_corners_occurences,
                          intrinsic_mat)

        print(f"Frame: {best_frame}, Inliers: {max_num_inl}")
        print(f"Cloud size: {len(cloud)}")
        view_mats[best_frame] = rodrigues_and_translation_to_view_mat3x4(
            best_rvec, best_tvec)
        tvecs[best_frame] = best_tvec

    last_mat = None
    for frame in range(num_frames):
        if view_mats[frame] is None:
            view_mats[frame] = last_mat
        else:
            last_mat = view_mats[frame]

    ids = []
    points = []

    for id in cloud.keys():
        ids.append(id)
        points.append(cloud[id])

    point_cloud_builder = PointCloudBuilder(np.array(ids, dtype=np.int),
                                            np.array(points))

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #26
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    frames_cnt = len(rgb_sequence)
    unused = np.array([
        i for i in range(0, frames_cnt)
        if i not in [known_view_1[0], known_view_2[0]]
    ])
    used = np.array([known_view_1[0], known_view_2[0]])
    used_pose = [known_view_1[1], known_view_2[1]]

    points, ids, cos = triangulate(known_view_1[0], known_view_2[0],
                                   known_view_1[1], known_view_2[1],
                                   corner_storage, intrinsic_mat, None)
    point_cloud_builder = PointCloudBuilder(ids, points)
    # print("Add frames", *known_ids)

    while unused.shape[0] > 0:
        added = []
        for i in range(len(unused)):
            pose_i, ids_to_remove = camera_pose(unused[i], corner_storage,
                                                point_cloud_builder,
                                                intrinsic_mat)
            if pose_i is None:
                continue

            for j in range(len(used)):
                points, ids, cos = triangulate(unused[i], used[j], pose_i,
                                               used_pose[j], corner_storage,
                                               intrinsic_mat, ids_to_remove)
                point_cloud_builder.add_points(ids, points)

            used = np.append(used, [unused[i]])
            used_pose.append(pose_i)
            added.append(unused[i])
            # print("Frame", unused[i], "done!!")

        if len(added) == 0:
            break
        unused = np.setdiff1d(unused, added)

        used_pose = recalculate_poses(used, point_cloud_builder,
                                      corner_storage, intrinsic_mat)

    view_mats = [None for i in range(frames_cnt)]
    for i in range(len(used)):
        view_mats[used[i]] = pose_to_view_mat3x4(used_pose[i])

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #27
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    view_mats_tmp = [None for _ in range(len(corner_storage))]
    all_points = {}
    not_none_mats = [known_view_1[0], known_view_2[0]]
    none_mats = set(
        [ind for ind in range(len(view_mats_tmp)) if ind not in not_none_mats])

    view_mats_tmp[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats_tmp[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])
    add_new_points(corner_storage, view_mats_tmp, known_view_1[0],
                   known_view_2[0], all_points, intrinsic_mat)

    old_len = 2
    for _ in range(len(view_mats_tmp)):
        for ind in none_mats:
            corner_ids = list(corner_storage[ind].ids.reshape(-1))
            intersection_inds = [
                point_ind for point_ind in corner_ids if
                point_ind in all_points and all_points[point_ind] is not None
            ]
            # There is an assert, so points number has to be at least 5
            if len(intersection_inds) < 5:
                continue
            # print(ind, len(corner_storage), len(corner_storage[ind].points), np.max(intersection_inds))
            corner_points = {
                i: p
                for i, p in zip(corner_ids, corner_storage[ind].points)
            }
            intersection_points_c = np.array(
                [corner_points[i] for i in intersection_inds])
            intersection_points_f = np.array(
                [all_points[i] for i in intersection_inds])
            # print(intersection_points_f)
            # print(intersection_points_f.shape, intersection_points_c.shape, intrinsic_mat)
            retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                intersection_points_f, intersection_points_c, intrinsic_mat,
                None)

            if not retval:
                continue

            print(f'Processing frame: {ind}')

            newly_none_number = 0
            for i in intersection_inds:
                if i not in inliers:
                    newly_none_number += 1
                    all_points[i] = None

            print(
                f'{newly_none_number} points filled as None, len of inliers: {inliers.shape[0]}'
            )
            print(
                f'Number of not points: {len([p for p in all_points.keys() if all_points[p] is not None])}'
            )

            view_mats_tmp[ind] = rodrigues_and_translation_to_view_mat3x4(
                rvec, tvec)

            for not_none_ind in not_none_mats:
                add_new_points(corner_storage, view_mats_tmp, ind,
                               not_none_ind, all_points, intrinsic_mat)

            not_none_mats.append(ind)
            none_mats.remove(ind)
            break

        if len(not_none_mats) == old_len:
            break
        old_len = len(not_none_mats)

    view_mats = [None for _ in range(len(corner_storage))]
    for view_mat_ind in not_none_mats:
        view_mats[view_mat_ind] = view_mats_tmp[view_mat_ind]
    last_ind = 0
    for i in range(len(view_mats)):
        if view_mats[i] is None:
            view_mats[i] = view_mats[last_ind]
        else:
            last_ind = i
    all_points = {k: v for k, v in all_points.items() if v is not None}
    point_cloud_builder = PointCloudBuilder(
        np.array(list(all_points.keys()), dtype=np.int),
        np.array(list(all_points.values())))

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #28
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])
    frame_count = len(corner_storage)
    view_mats = [None] * frame_count

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = init_first_camera_positions(
            intrinsic_mat, corner_storage)
        id_1, view_mat_1 = known_view_1
        id_2, view_mat_2 = known_view_2
        view_mats[id_1], view_mats[id_2] = view_mat_1, view_mat_2
    else:
        id_1, pose_1 = known_view_1
        id_2, pose_2 = known_view_2
        view_mats[id_1] = pose_to_view_mat3x4(pose_1)
        view_mats[id_2] = pose_to_view_mat3x4(pose_2)
    params = TriangulationParameters(max_reprojection_error=2,
                                     min_triangulation_angle_deg=1,
                                     min_depth=0.5)
    correspondences = build_correspondences(corner_storage[id_1],
                                            corner_storage[id_2])
    points, ids, _ = triangulate_correspondences(correspondences,
                                                 view_mats[id_1],
                                                 view_mats[id_2],
                                                 intrinsic_mat, params)

    point_cloud_builder = PointCloudBuilder(ids.reshape(-1), points)

    while True:
        update = False
        for i in range(frame_count):
            if view_mats[i] is not None:
                continue
            corners = corner_storage[i]
            ids, ids1, ids2 = np.intersect1d(corners.ids,
                                             point_cloud_builder.ids,
                                             return_indices=True)
            points_2d, points_3d = corners.points[
                ids1], point_cloud_builder.points[ids2]
            if len(ids) < 6:
                continue
            retval, rvec, tvec, inliers = solvePnPRansac(points_3d,
                                                         points_2d,
                                                         intrinsic_mat,
                                                         distCoeffs=None)
            if not retval:
                continue
            retval, rvec, tvec = solvePnP(points_3d[inliers],
                                          points_2d[inliers],
                                          intrinsic_mat,
                                          distCoeffs=None,
                                          rvec=rvec,
                                          tvec=tvec,
                                          useExtrinsicGuess=True)
            if not retval:
                continue
            view_mats[i] = rodrigues_and_translation_to_view_mat3x4(rvec, tvec)
            update = True
            outliers = np.delete(ids, inliers)
            for j in range(i):
                if view_mats[j] is None:
                    continue
                correspondences = build_correspondences(corners,
                                                        corner_storage[j],
                                                        ids_to_remove=outliers)
                points, ids, _ = triangulate_correspondences(
                    correspondences, view_mats[i], view_mats[j], intrinsic_mat,
                    params)
                point_cloud_builder.add_points(ids.reshape(-1), points)
            click.echo("Process frame %d/%d. %d 3D points found. inliners=%d" %
                       (i + 1, frame_count, len(
                           point_cloud_builder.points), len(inliers)))
        if not update:
            break

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
예제 #29
0
def track(intrinsic_mat: np.ndarray,
          corner_storage: CornerStorage,
          known_view_1: Optional[Tuple[int, Pose]],
          known_view_2: Optional[Tuple[int, Pose]]) \
        -> Tuple[np.ndarray, np.ndarray, np.ndarray, CornerStorage]:
    assert (known_view_1 is None) == (known_view_2 is None)

    n_frames = len(corner_storage)
    point_storage = PointStorage(corner_storage)
    view_mats = np.zeros((0, 3, 4), dtype=np.float64)

    if known_view_1 is None:
        (frame_num_1, view_1), (frame_num_2, view_2) = find_init_views(
            point_storage, intrinsic_mat)
    else:
        if known_view_1[0] > known_view_2[0]:
            known_view_1, known_view_2 = known_view_2, known_view_1

        frame_num_1, pose_1 = known_view_1
        frame_num_2, pose_2 = known_view_2

        view_1 = pose_to_view_mat3x4(pose_1)
        view_2 = pose_to_view_mat3x4(pose_2)

    print(f'Initial frames: {frame_num_1} and {frame_num_2}')

    find_init_points(frame_num_1, frame_num_2,
                     view_1, view_2,
                     point_storage,
                     intrinsic_mat)
    point_storage.process_all_events()
    view_mats = np.concatenate((view_mats, np.expand_dims(view_1, axis=0)))

    print(f'{1} of {n_frames}   {point_storage.point_cloud_size()} inliers   '
          f'{point_storage.point_cloud_size()} triangulated   '
          f'{point_storage.point_cloud_size()} in cloud')

    first_point_storage = point_storage.get_prefix(frame_num_1 + 1)
    first_point_storage.reverse()

    for frame in range(1, frame_num_1 + 1):
        view, inliers_cnt = find_view(frame, first_point_storage, intrinsic_mat)
        view_mats = np.concatenate((view_mats, np.expand_dims(view, axis=0)))

        triangulated_cnt = find_new_points(frame, view_mats, first_point_storage, intrinsic_mat)

        print(f'{frame + 1} of {n_frames}   {inliers_cnt} inliers   '
              f'{triangulated_cnt} triangulated   '
              f'{first_point_storage.point_cloud_size()} in cloud')

    first_point_storage.reverse()
    point_storage.set_prefix(first_point_storage, frame_num_1 + 1)
    view_mats = view_mats[::-1]

    for frame in range(frame_num_1 + 1, n_frames):
        view, inliers_cnt = find_view(frame, point_storage, intrinsic_mat)
        view_mats = np.concatenate((view_mats, np.expand_dims(view, axis=0)))

        triangulated_cnt = find_new_points(frame, view_mats, point_storage, intrinsic_mat)

        print(f'{frame + 1} of {n_frames}   {inliers_cnt} inliers   '
              f'{triangulated_cnt} triangulated   '
              f'{point_storage.point_cloud_size()} in cloud')

    point_storage.process_all_events()
    point_storage.truncate_point_cloud(MAX_POINT_CLOUD_SIZE)
    view_mats = optimize(view_mats, point_storage, intrinsic_mat)

    return (view_mats, *point_storage.point_cloud(), point_storage.build_corner_storage())
예제 #30
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(
        camera_parameters,
        rgb_sequence[0].shape[0]
    )

    # TODO: implement
    frame_count = len(corner_storage)
    view_mats = [None] * frame_count
    view_1_id, view_1 = known_view_1[0], pose_to_view_mat3x4(known_view_1[1])
    view_2_id, view_2 = known_view_2[0], pose_to_view_mat3x4(known_view_2[1])

    view_mats[view_1_id] = view_1
    view_mats[view_2_id] = view_2

    correspondences = build_correspondences(corner_storage[view_1_id], corner_storage[view_2_id], None)
    triangulation_parameters = TriangulationParameters(1, 5, 0)
    points, ids, _ = triangulate_correspondences(correspondences, view_1, view_2,
                                                 intrinsic_mat, TriangulationParameters(0.1, 0.1, 0))

    #corners_0 = corner_storage[0]
    point_cloud_builder = PointCloudBuilder(ids, points)

    unknown_frames = frame_count - 2
    known_frames = {view_1_id, view_2_id}

    while unknown_frames > 0:
        for i in range(frame_count):
            if view_mats[i] is None:
                corners = corner_storage[i]
                interesting_ids, in_corners, in_cloud = np.intersect1d(corners.ids.flatten(), point_cloud_builder.ids.flatten(), return_indices=True)
                points_2d = corners.points[in_corners]
                points_3d = point_cloud_builder.points[in_cloud]

                if len(ids) < 3:
                    continue

                method = cv2.SOLVEPNP_EPNP
                if len(ids) == 3:
                    method = cv2.SOLVEPNP_P3P
                retval, rvec, tvec, inliers = cv2.solvePnPRansac(points_3d, points_2d, intrinsic_mat, None,
                                                                 flags=method)
                if not retval:
                    continue
                retval, rvec, tvec = cv2.solvePnP(points_3d[inliers], points_2d[inliers], intrinsic_mat, None,
                                                  rvec=rvec, tvec=tvec, useExtrinsicGuess=True)
                if not retval:
                    continue

                view_mats[i] = rodrigues_and_translation_to_view_mat3x4(rvec, tvec)
                unknown_frames -= 1
                known_frames.add(i)

                outliers = np.delete(interesting_ids, inliers)
                for frame in known_frames:
                    correspondences = build_correspondences(corners, corner_storage[frame], outliers)
                    points_3d, corner_ids, _ = triangulate_correspondences(
                        correspondences, view_mats[i], view_mats[frame], intrinsic_mat, triangulation_parameters)
                    point_cloud_builder.add_points(corner_ids, points_3d)




    calc_point_cloud_colors(
        point_cloud_builder,
        rgb_sequence,
        view_mats,
        intrinsic_mat,
        corner_storage,
        5.0
    )
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud