Esempio n. 1
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(
        camera_parameters,
        rgb_sequence[0].shape[0]
    )

    # TODO: implement
    frame_count = len(corner_storage)
    view_mats = [pose_to_view_mat3x4(known_view_1[1])] * frame_count
    corners_0 = corner_storage[0]
    point_cloud_builder = PointCloudBuilder(corners_0.ids[:1],
                                            np.zeros((1, 3)))

    calc_point_cloud_colors(
        point_cloud_builder,
        rgb_sequence,
        view_mats,
        intrinsic_mat,
        corner_storage,
        5.0
    )
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 2
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(
        camera_parameters,
        rgb_sequence[0].shape[0]
    )

    view_mats, point_ids, points3d, corner_storage = track(intrinsic_mat, corner_storage,
                                                            known_view_1, known_view_2)

    point_cloud_builder = PointCloudBuilder(point_ids, points3d)

    calc_point_cloud_colors(
        point_cloud_builder,
        rgb_sequence,
        view_mats,
        intrinsic_mat,
        corner_storage,
        5.0
    )
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 3
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])
    frame_count = len(corner_storage)
    view_mats = [None] * frame_count

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = init_first_camera_positions(
            intrinsic_mat, corner_storage)
        id_1, view_mat_1 = known_view_1
        id_2, view_mat_2 = known_view_2
        view_mats[id_1], view_mats[id_2] = view_mat_1, view_mat_2
    else:
        id_1, pose_1 = known_view_1
        id_2, pose_2 = known_view_2
        view_mats[id_1] = pose_to_view_mat3x4(pose_1)
        view_mats[id_2] = pose_to_view_mat3x4(pose_2)
    params = TriangulationParameters(max_reprojection_error=2,
                                     min_triangulation_angle_deg=1,
                                     min_depth=0.5)
    correspondences = build_correspondences(corner_storage[id_1],
                                            corner_storage[id_2])
    points, ids, _ = triangulate_correspondences(correspondences,
                                                 view_mats[id_1],
                                                 view_mats[id_2],
                                                 intrinsic_mat, params)

    point_cloud_builder = PointCloudBuilder(ids.reshape(-1), points)

    while True:
        update = False
        for i in range(frame_count):
            if view_mats[i] is not None:
                continue
            corners = corner_storage[i]
            ids, ids1, ids2 = np.intersect1d(corners.ids,
                                             point_cloud_builder.ids,
                                             return_indices=True)
            points_2d, points_3d = corners.points[
                ids1], point_cloud_builder.points[ids2]
            if len(ids) < 6:
                continue
            retval, rvec, tvec, inliers = solvePnPRansac(points_3d,
                                                         points_2d,
                                                         intrinsic_mat,
                                                         distCoeffs=None)
            if not retval:
                continue
            retval, rvec, tvec = solvePnP(points_3d[inliers],
                                          points_2d[inliers],
                                          intrinsic_mat,
                                          distCoeffs=None,
                                          rvec=rvec,
                                          tvec=tvec,
                                          useExtrinsicGuess=True)
            if not retval:
                continue
            view_mats[i] = rodrigues_and_translation_to_view_mat3x4(rvec, tvec)
            update = True
            outliers = np.delete(ids, inliers)
            for j in range(i):
                if view_mats[j] is None:
                    continue
                correspondences = build_correspondences(corners,
                                                        corner_storage[j],
                                                        ids_to_remove=outliers)
                points, ids, _ = triangulate_correspondences(
                    correspondences, view_mats[i], view_mats[j], intrinsic_mat,
                    params)
                point_cloud_builder.add_points(ids.reshape(-1), points)
            click.echo("Process frame %d/%d. %d 3D points found. inliners=%d" %
                       (i + 1, frame_count, len(
                           point_cloud_builder.points), len(inliers)))
        if not update:
            break

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 4
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    view_mats = np.zeros((len(corner_storage), 3, 4), dtype=np.float64)
    processed_frames = np.zeros(len(corner_storage), dtype=np.bool)
    points3d = np.zeros((corner_storage.max_corner_id() + 1, 3),
                        dtype=np.float64)
    added_points = np.zeros(corner_storage.max_corner_id() + 1, dtype=np.bool)

    print('Trying to extract 3d points from known frames...')

    view_mats[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])
    processed_frames[known_view_1[0]] = processed_frames[
        known_view_2[0]] = True
    extracted_points = extract_points(corner_storage[known_view_1[0]],
                                      corner_storage[known_view_2[0]],
                                      view_mats[known_view_1[0]],
                                      view_mats[known_view_2[0]],
                                      intrinsic_mat)
    if not extracted_points:
        print(
            'Extracting 3d points from known frames failed: '
            'either there are no common points, or triangulation angle between frames is too small.\n'
            'Try to choose another initial frames.',
            file=sys.stderr)
        exit(0)
    print('Succeeded! Trying to build point cloud...')

    added_points[extracted_points[0]] = True
    points3d[extracted_points[0]] = extracted_points[1]

    was_updated = True
    while was_updated:
        was_updated = False
        unprocessed_frames = np.arange(len(corner_storage),
                                       dtype=np.int32)[~processed_frames]
        for frame in unprocessed_frames:
            points3d_ids = np.arange(corner_storage.max_corner_id() + 1,
                                     dtype=np.int32)[added_points]
            common, (indices1, indices2) = snp.intersect(
                points3d_ids,
                corner_storage[frame].ids.flatten(),
                indices=True)
            if len(common) <= 5:
                continue

            try:
                print(f'Processing frame {frame}: ', end='')
                retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                    objectPoints=points3d[common],
                    imagePoints=corner_storage[frame].points[indices2],
                    cameraMatrix=intrinsic_mat,
                    distCoeffs=None)
            except:
                retval = False

            if not retval:
                print(f'failed to solve PnP RANSAC, trying another frame')
                continue

            print(f'succeeded, found {len(inliers)} inliers')
            was_updated = True
            view_mats[frame] = rodrigues_and_translation_to_view_mat3x4(
                rvec, tvec)
            for processed_frame in np.arange(len(corner_storage),
                                             dtype=np.int32)[processed_frames]:
                extracted_points = extract_points(
                    corner_storage[frame], corner_storage[processed_frame],
                    view_mats[frame], view_mats[processed_frame],
                    intrinsic_mat, points3d_ids)

                if extracted_points:
                    added_points[extracted_points[0]] = True
                    points3d[extracted_points[0]] = extracted_points[1]
            processed_frames[frame] = True
            print(f'Current point cloud contains {sum(added_points)} points')

    for _ in range(2):
        for i in range(1, len(corner_storage)):
            if not processed_frames[i]:
                processed_frames[i] = True
                view_mats[i] = view_mats[i - 1]
        processed_frames = processed_frames[::-1]
        view_mats = view_mats[::-1]

    print(
        f'Finished building point cloud, now it contains {sum(added_points)} points'
    )

    point_cloud_builder = PointCloudBuilder(ids=np.arange(
        corner_storage.max_corner_id() + 1, dtype=np.int32)[added_points],
                                            points=points3d[added_points])

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 5
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = get_matrix_poses(corner_storage,
                                                      intrinsic_mat)

    video_size = len(rgb_sequence)
    frame_trackers = [
        FrameTrack(i, corner_storage[i]) for i in range(video_size)
    ]
    known_tracker_creator = lambda x: FrameTrack(x[0], corner_storage[x[0]],
                                                 pose_to_view_mat3x4(x[1]))

    frame_trackers[known_view_1[0]] = known_tracker_creator(known_view_1)
    frame_trackers[known_view_2[0]] = known_tracker_creator(known_view_2)

    init_params = triang_params

    for angle in range(90, 0, -2):
        params = TriangulationParameters(
            max_reprojection_error=MAX_REPROJECTION_ERROR,
            min_triangulation_angle_deg=angle,
            min_depth=0.001)
        _, points = triangulate_trackers(frame_trackers[known_view_1[0]],
                                         frame_trackers[known_view_2[0]],
                                         intrinsic_mat, params)
        if len(points) > 100:
            print(f"Chosen init angle: {angle}")
            init_params = params
            break

    ids, points = triangulate_trackers(frame_trackers[known_view_1[0]],
                                       frame_trackers[known_view_2[0]],
                                       intrinsic_mat, init_params)

    point_cloud_builder = PointCloudBuilder(ids, points)
    if len(points) < MIN_STARTING_POINTS:
        print(
            f"Not enough starting points ({len(points)}), please choose another initial frames pair"
            f"\n0, 20 is a good pair for short fox, ")
        return [], PointCloudBuilder()

    frame_trackers[known_view_1[0]].update_reproj_error(
        point_cloud_builder, intrinsic_mat)
    frame_trackers[known_view_2[0]].update_reproj_error(
        point_cloud_builder, intrinsic_mat)

    point_cloud_builder = track(ITERATIONS, frame_trackers,
                                point_cloud_builder, intrinsic_mat)

    view_mats = [x.mtx for x in frame_trackers]
    for i in range(1, len(view_mats)):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i - 1]
    for i in range(len(view_mats) - 2, -1, -1):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i + 1]

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 6
0
def track_and_calc_colors(
    camera_parameters: CameraParameters,
    corner_storage: CornerStorage,
    frame_sequence_path: str,
    known_view_1: Optional[Tuple[int, Pose]] = None,
    known_view_2: Optional[Tuple[int, Pose]] = None
) -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = calculate_known_views(
            intrinsic_mat, corner_storage)
        print('Calculated known views: {} and {}'.format(
            known_view_1[0], known_view_2[0]))

    random.seed(239)

    view_mats = np.full((len(rgb_sequence), ), None)
    view_mats[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])

    point_cloud_builder = PointCloudBuilder()

    def triangulate_and_add_points(corners1, corners2, view_mat1, view_mat2):
        points3d, ids, median_cos = triangulate_correspondences(
            build_correspondences(corners1, corners2), view_mat1, view_mat2,
            intrinsic_mat, triang_params)
        point_cloud_builder.add_points(ids, points3d)

    triangulate_and_add_points(corner_storage[known_view_1[0]],
                               corner_storage[known_view_2[0]],
                               view_mats[known_view_1[0]],
                               view_mats[known_view_2[0]])

    inliers_points3d, inliers_points2d = None, None

    def find_errs(rtvecs):
        rvecs, tvecs = rtvecs[:3], rtvecs[3:]
        rmat, _ = cv2.Rodrigues(np.expand_dims(rvecs, axis=1))
        rmat = np.concatenate((rmat, np.expand_dims(tvecs, axis=1)), axis=1)
        return (project_points(inliers_points3d, intrinsic_mat @ rmat) -
                inliers_points2d).flatten()

    while True:
        random_ids = list(range(len(view_mats)))
        random.shuffle(random_ids)
        found_new_view_mat = False
        for i in random_ids:
            if view_mats[i] is not None:
                continue
            common, (ids1,
                     ids2) = snp.intersect(point_cloud_builder.ids.flatten(),
                                           corner_storage[i].ids.flatten(),
                                           indices=True)
            if len(common) <= 10:
                continue

            points3d = point_cloud_builder.points[ids1]
            points2d = corner_storage[i].points[ids2]
            retval, rvecs, tvecs, inliers = cv2.solvePnPRansac(
                points3d,
                points2d,
                intrinsic_mat,
                iterationsCount=108,
                reprojectionError=triang_params.max_reprojection_error,
                distCoeffs=None,
                confidence=0.999,
                flags=cv2.SOLVEPNP_EPNP)
            if retval:
                # M-оценки
                inliers_points3d = points3d[inliers.flatten()]
                inliers_points2d = points2d[inliers.flatten()]
                rtvecs = least_squares(find_errs,
                                       x0=np.concatenate(
                                           (rvecs, tvecs)).flatten(),
                                       loss='huber',
                                       method='trf').x
                rvecs = rtvecs[:3].reshape((-1, 1))
                tvecs = rtvecs[3:].reshape((-1, 1))
            if not retval:
                continue

            print(
                'Iteration {}/{}, processing {}th frame: {} inliers, {} points in point cloud'
                .format(
                    len([v for v in view_mats if v is not None]) - 1,
                    len(rgb_sequence) - 2, i, len(inliers),
                    len(point_cloud_builder.points)))

            view_mats[i] = rodrigues_and_translation_to_view_mat3x4(
                rvecs, tvecs)
            found_new_view_mat = True

            # Add new points
            inliers = np.array(inliers).astype(int).flatten()
            inlier_corners = FrameCorners(
                *[c[inliers] for c in corner_storage[i]])
            for j in range(len(view_mats)):
                if view_mats[j] is not None:
                    triangulate_and_add_points(corner_storage[j],
                                               inlier_corners, view_mats[j],
                                               view_mats[i])

            sys.stdout.flush()

        if not found_new_view_mat:
            break

    for i in range(0, len(view_mats)):
        if view_mats[i] is None:
            print('Я сдох: не все кадры обработаны :(')
            exit(1)

    if len(view_mats) < 100:  # Иначе долго работает
        view_mats = bundle_adjustment(view_mats, point_cloud_builder,
                                      intrinsic_mat, corner_storage)

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 7
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                            frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = calc_known_views(corner_storage,
                                                      intrinsic_mat)

    # TODO: implement
    num_frames_retr = 10
    triang_params = TriangulationParameters(8.0, 1.0, .1)

    num_frames = len(corner_storage)

    frame_1 = known_view_1[0]
    frame_2 = known_view_2[0]

    print(f'{frame_1}, {frame_2}')

    tvecs = [None] * num_frames
    tvecs[frame_1] = known_view_1[1].t_vec
    tvecs[frame_2] = known_view_2[1].t_vec

    view_mat_1 = pose_to_view_mat3x4(known_view_1[1])
    view_mat_2 = pose_to_view_mat3x4(known_view_2[1])

    view_mats = [None] * num_frames
    view_mats[frame_1] = view_mat_1
    view_mats[frame_2] = view_mat_2

    cloud = {}

    corrs = build_correspondences(corner_storage[frame_1],
                                  corner_storage[frame_2])
    points3d, ids, _ = triangulate_correspondences(corrs, view_mat_1,
                                                   view_mat_2, intrinsic_mat,
                                                   triang_params)
    for point3d, id in zip(points3d, ids):
        cloud[id] = point3d

    current_corners_occurences = {}
    corner_to_frames = {}

    for i, corners in enumerate(corner_storage):
        for j, id in enumerate(corners.ids.flatten()):
            current_corners_occurences[id] = 0
            if id not in corner_to_frames.keys():
                corner_to_frames[id] = [[i, j]]
            else:
                corner_to_frames[id].append([i, j])

    while len(untracked_frames(view_mats)) > 0:
        untr_frames = untracked_frames(view_mats)

        max_num_inl = -1
        best_frame = -1
        best_rvec = None
        best_tvec = None

        for frame in untr_frames:
            rvec, tvec, num_inl = calc_camera_pose(frame, corner_storage,
                                                   cloud, intrinsic_mat)
            if rvec is not None:
                if num_inl > max_num_inl:
                    best_frame = frame
                    max_num_inl = num_inl
                    best_rvec = rvec
                    best_tvec = tvec

        if max_num_inl == -1:
            break

        corners_to_add = []
        for id in corner_storage[best_frame].ids.flatten():
            if id in current_corners_occurences.keys():
                current_corners_occurences[id] += 1
                if current_corners_occurences[id] >= 2:
                    corners_to_add.append(id)

        for corner in corners_to_add:
            add_new_point(corner, corner_to_frames, view_mats, tvecs,
                          corner_storage, cloud, current_corners_occurences,
                          intrinsic_mat)

        print(f"Frame: {best_frame}, Inliers: {max_num_inl}")
        print(f"Cloud size: {len(cloud)}")
        view_mats[best_frame] = rodrigues_and_translation_to_view_mat3x4(
            best_rvec, best_tvec)
        tvecs[best_frame] = best_tvec

    last_mat = None
    for frame in range(num_frames):
        if view_mats[frame] is None:
            view_mats[frame] = last_mat
        else:
            last_mat = view_mats[frame]

    ids = []
    points = []

    for id in cloud.keys():
        ids.append(id)
        points.append(cloud[id])

    point_cloud_builder = PointCloudBuilder(np.array(ids, dtype=np.int),
                                            np.array(points))

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:

    # функция для проверки что матрица поворота дейтсвительно матрица поворота
    def check_if_mat_is_rot_mat(mat):
        mat = mat[:, :3]
        det = np.linalg.det(mat)
        # определитель должен быть = 1
        diff1 = abs(1 - det)
        x = mat @ mat.T
        # обратная матрица должна получаться транспонированием
        diff2 = abs(np.sum(np.eye(3) - x))
        eps = 0.001
        return diff1 < eps and diff2 < eps

    MAX_REPROJECTION_ERROR = 1.6
    MIN_DIST_TRIANGULATE = 0.01

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    frame_count = len(corner_storage)
    t_vecs = [None] * frame_count
    view_mats = [None] * frame_count  # позиции камеры на всех кадрах

    # для каждого уголка находим все кадры на котором он есть и его порядковый номер в каждом из них
    cur_corners_occurencies = {}
    frames_of_corner = {}
    for i, corners in enumerate(corner_storage):
        for id_in_list, j in enumerate(corners.ids.flatten()):
            cur_corners_occurencies[j] = 0
            if j not in frames_of_corner.keys():
                frames_of_corner[j] = [[i, id_in_list]]
            else:
                frames_of_corner[j].append([i, id_in_list])

    #по двум кадрам находим общие точки и восстанавливаем их позиции в 3D
    def triangulate(frame_0, frame_1, params=TriangulationParameters(2, 1e-3, 1e-4), ids_to_remove=None) \
            -> Tuple[np.ndarray, np.ndarray, float]:
        corrs = build_correspondences(corner_storage[frame_0],
                                      corner_storage[frame_1])
        return triangulate_correspondences(corrs, view_mats[frame_0],
                                           view_mats[frame_1], intrinsic_mat,
                                           params)

    #константы для инициализации
    BASELINE_THRESHOLD = 0.9
    REPROJECTION_ERROR_THRESHOLD = 1.4
    MIN_3D_POINTS = 10
    frame_steps = [9, 30, 40]

    #нахождение относительного движения между двумя данными кадрами
    def get_first_cam_pose(frame_0, frame_1):
        corrs = build_correspondences(corner_storage[frame_0],
                                      corner_storage[frame_1])
        e_mat, _ = cv2.findEssentialMat(corrs.points_1, corrs.points_2,
                                        intrinsic_mat, cv2.RANSAC, 0.999, 2.0)

        def essential_mat_to_camera_view_matrix(e_mat):
            U, S, VH = np.linalg.svd(e_mat, full_matrices=True)
            W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]], dtype=np.float)
            u3 = U[:, 2]
            possible_R = [U @ W @ VH, U @ W.T @ VH]
            possible_t = [u3, -u3]
            best_R = None
            best_t = None
            max_positive_z = 0
            for R in possible_R:
                for t in possible_t:
                    view_mat0 = np.hstack((np.eye(3), np.zeros((3, 1))))
                    view_mat1 = np.hstack((R, t.reshape((3, 1))))
                    view_mats[frame_0] = view_mat0
                    view_mats[frame_1] = view_mat1
                    points3d, ids, median_cos = triangulate(frame_0, frame_1)
                    cur_positive_z = 0
                    for pt in points3d:
                        pt = np.append(pt, np.zeros((1))).reshape((4, 1))
                        pt_transformed0 = (view_mat0 @ pt).flatten()
                        z0 = pt_transformed0[2]
                        pt_transformed1 = (view_mat1 @ pt).flatten()
                        z1 = pt_transformed1[2]
                        cur_positive_z += (z0 > 0.1) + (z1 > 0.1)
                    if cur_positive_z > max_positive_z:
                        max_positive_z = cur_positive_z
                        best_R = R
                        best_t = t
            return best_R, best_t

        R, t = essential_mat_to_camera_view_matrix(e_mat)
        baseline = np.linalg.norm(t)

        view_mat0 = np.hstack((np.eye(3), np.zeros((3, 1))))
        view_mat1 = np.hstack((R, t.reshape((3, 1))))
        view_mats[frame_0] = view_mat0
        view_mats[frame_1] = view_mat1
        points3d, ids, median_cos = triangulate(frame_0, frame_1)
        points0 = []
        points1 = []
        for i in ids:
            for j in frames_of_corner[i]:
                if j[0] == frame_0:
                    points0.append(j[1])
                if j[0] == frame_1:
                    points1.append(j[1])
        points0 = np.array(points0)
        points1 = np.array(points1)
        sum_error = compute_reprojection_errors(
            points3d, corner_storage[frame_0].points[points0],
            intrinsic_mat @ view_mat0) + compute_reprojection_errors(
                points3d, corner_storage[frame_1].points[points1],
                intrinsic_mat @ view_mat1)
        reprojection_error = np.mean(sum_error)
        points3d_count = len(points3d)

        view_mats[frame_0] = None
        view_mats[frame_1] = None

        #проверяем насколько хороша данная пара кадров, и заодно проверяем что матрица R - реально поворот
        if baseline < BASELINE_THRESHOLD or reprojection_error > REPROJECTION_ERROR_THRESHOLD or points3d_count < MIN_3D_POINTS or not check_if_mat_is_rot_mat(
                np.hstack((R, t.reshape((3, 1))))):
            return False, baseline, reprojection_error, points3d_count, None, None
        else:
            return True, baseline, reprojection_error, points3d_count, R, t

    #делаем перебор пар кадров для инициализации
    INF = 1e9
    best_frame_step = 5
    best = [-INF, INF, -INF]
    ind = None
    for frame_step in frame_steps:
        for i in range(frame_count // 2):
            if i + frame_step < frame_count * 0.85:
                ok, baseline, reproection_error, points3d_count, Rx, tx = get_first_cam_pose(
                    i, i + frame_step)
                if ok:
                    best = [baseline, reproection_error, points3d_count]
                    ind = i
                    best_frame_step = frame_step
                    if frame_count > 100:
                        break

    # крашаемся, если не удалось инициализироваться ни по какой паре кадров
    if ind is None:
        raise NotImplementedError()

    #инициализируемся по лучшей найденной паре
    frame_0 = ind
    frame_1 = ind + best_frame_step
    ok, baseline, reproection_error, points3d_count, R, t = get_first_cam_pose(
        frame_0, frame_1)
    print('INITIALIZATION RESULTS:')
    print('FRAMES: frame_0={}, frame_1={}, total_frames={}'.format(
        frame_0, frame_1, frame_count))
    print(baseline, reproection_error, points3d_count, R, t)
    view_mat0 = np.hstack((np.eye(3), np.zeros((3, 1))))
    view_mat1 = np.hstack((R, t.reshape((3, 1))))
    view_mats[frame_0] = view_mat0
    view_mats[frame_1] = view_mat1
    t_vecs[frame_0] = np.zeros((3), dtype=np.float64)
    t_vecs[frame_1] = t

    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # зачем хранить инфу о пендинг точках вперемешку с остальными точками облака???
    # 'id': coords [., ., .] 3d coordinates, rays [(frame, coords)] - for new points addition
    points_cloud = {}

    # инициализируем облако точек по 2 положениям
    points3d, ids, median_cos = triangulate(frame_0, frame_1)
    for id, point3d in zip(ids, points3d):
        points_cloud[id] = point3d

    def find_camera_pose(frame_id):
        # сначала находим инлаеры ранзаком
        # потом на инлаерах делаем пнп с итеративной мин кв
        corners = corner_storage[frame_id]
        points3d = []
        points2d = []
        for id, point2d in zip(corners.ids.flatten(), corners.points):
            if id in points_cloud.keys():
                points3d.append(points_cloud[id])
                points2d.append(point2d)
        # чтобы епнп работал
        points3d = np.array(points3d, dtype=np.float64)
        points2d = np.array(points2d, dtype=np.float64)
        if len(points3d) < 4:
            return None

        # выбираем эталонную 4 точек, строим по ней R и t
        # затем смотрим, на какой выборке эталонных точек больше всего других точек
        # спроецируются корректно
        success, R, t, inliers = cv2.solvePnPRansac(
            objectPoints=points3d,
            imagePoints=points2d,
            cameraMatrix=intrinsic_mat,
            distCoeffs=None,
            flags=cv2.SOLVEPNP_EPNP,
            confidence=0.9995,
            reprojectionError=MAX_REPROJECTION_ERROR)

        if not success:
            return None

        inliers = np.asarray(inliers, dtype=np.int32).flatten()
        points3d = np.array(points3d)
        points2d = np.array(points2d)
        points3d = points3d[inliers]
        points2d = points2d[inliers]
        retval, R, t = cv2.solvePnP(objectPoints=points3d,
                                    imagePoints=points2d,
                                    cameraMatrix=intrinsic_mat,
                                    distCoeffs=None,
                                    flags=cv2.SOLVEPNP_ITERATIVE,
                                    useExtrinsicGuess=True,
                                    rvec=R,
                                    tvec=t)
        return R, t, len(inliers)

    # можно ускорить
    def frames_without_pose():
        frames = []
        for ind, mat in enumerate(view_mats):
            if mat is None:
                frames.append(ind)
        return frames

    def try_add_new_point_to_cloud(corner_id):
        # все фреймы на которых есть данный уголок, и у которых уже найдена view_mat
        # а можно для уголка хранить не все фреймы где он есть, а только те, где он инлаер!
        frames = []
        for frame in frames_of_corner[corner_id]:
            if view_mats[frame[0]] is not None:
                frames.append(frame)
        if len(frames) < 2:
            return

        max_dist = 0
        best_frames = [None, None]
        best_ids = [None, None]

        # можно ускорить хотя бы вдвое
        for frame_1 in frames:
            for frame_2 in frames:
                if frame_1 == frame_2:
                    continue
                # эм, по идее это не возможно, когда есть view_mat есть и t_vec
                if t_vecs[frame_1[0]] is None or t_vecs[frame_2[0]] is None:
                    continue
                t_vec_1 = t_vecs[frame_1[0]]
                t_vec_2 = t_vecs[frame_2[0]]
                dist = np.linalg.norm(t_vec_1 - t_vec_2)
                if max_dist < dist:
                    max_dist = dist
                    best_frames = [frame_1[0], frame_2[0]]
                    best_ids = [frame_1[1], frame_2[1]]

        if max_dist > MIN_DIST_TRIANGULATE:
            ids = np.array([corner_id])
            points1 = corner_storage[best_frames[0]].points[np.array(
                [best_ids[0]])]
            points2 = corner_storage[best_frames[1]].points[np.array(
                [best_ids[1]])]
            # что за ужасный синтаксис??????
            corrs = corrs = Correspondences(ids, points1, points2)
            points3d, ids, med_cos = triangulate_correspondences(
                corrs,
                view_mats[best_frames[0]],
                view_mats[best_frames[1]],
                intrinsic_mat,
                parameters=TriangulationParameters(2, 1e-3, 1e-4))
            # зачем проверка??? зачем делаем поп?????
            if len(points3d) > 0:
                points_cloud[ids[0]] = points3d[0]
                cur_corners_occurencies.pop(ids[0], None)

    # в этой функции мы еще бонусом возвращаем те точки, которые встречаются хотя бы 2 раза
    # а также мы добавляем оккуранси тем точкам которые уже в облаке))))))))))))))))))))))))))))
    def add_corners_occurencies(frame_id):
        two_and_greater = []
        for id in corner_storage[frame_id].ids.flatten():
            if id in cur_corners_occurencies.keys():
                cur_corners_occurencies[id] += 1
                if cur_corners_occurencies[id] >= 2:
                    two_and_greater.append(id)
        return two_and_greater

    # очень эффективно))))))))))
    while len(frames_without_pose()) > 0:
        # пока есть не найденные положения камеры будем искать лучшее для восстановления
        # т.е. такое, где больше всего инлаеров после применения пнп ранзака
        wanted_frames = frames_without_pose()
        # inliers_amount = [] не понял зачем этот лист
        max_col = -1
        best_frame_id = -1
        best_R = None
        best_t = None
        for frame_id in wanted_frames:
            result = find_camera_pose(frame_id)
            if result is not None:
                R, t, col = result
                if col > max_col:
                    max_col = col
                    best_frame_id = frame_id
                    best_R = R
                    best_t = t
        if max_col == -1:
            # больше позиции камер не находятся
            break

        view_mats[best_frame_id] = rodrigues_and_translation_to_view_mat3x4(
            best_R, best_t)
        t_vecs[best_frame_id] = best_t

        try_add = add_corners_occurencies(best_frame_id)
        for corner in try_add:
            try_add_new_point_to_cloud(corner)
        print(
            'Now we add camera pose on {}-th frame, camera pose was calculated by {} inliers'
            .format(best_frame_id, max_col))
        ttl_poses_calculated = np.sum([mat is not None for mat in view_mats])
        percent = "{:.0f}".format(ttl_poses_calculated / frame_count * 100.0)
        print('{}% poses calculated'.format(percent))

        print('{} points in 3D points cloud'.format(len(points_cloud)))

    last_view_mat = None
    for i in range(frame_count):
        if view_mats[i] is None:
            view_mats[i] = last_view_mat
        else:
            last_view_mat = view_mats[i]

    #check that all matrices are rotation matrices
    for ind, i in enumerate(view_mats):
        if not check_if_mat_is_rot_mat(i):
            print(
                'In frame {}/{} we have matrix wich is not rotation matrix:('.
                format(ind + 1, frame_count))
            raise NotImplementedError()

    ids = []
    points = []
    for id in points_cloud.keys():
        ids.append(id)
        points.append(points_cloud[id])

    point_cloud_builder = PointCloudBuilder(np.array(ids, dtype=np.int32),
                                            np.array(points))

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 9
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    view_mats_tmp = [None for _ in range(len(corner_storage))]
    all_points = {}
    not_none_mats = [known_view_1[0], known_view_2[0]]
    none_mats = set(
        [ind for ind in range(len(view_mats_tmp)) if ind not in not_none_mats])

    view_mats_tmp[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats_tmp[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])
    add_new_points(corner_storage, view_mats_tmp, known_view_1[0],
                   known_view_2[0], all_points, intrinsic_mat)

    old_len = 2
    for _ in range(len(view_mats_tmp)):
        for ind in none_mats:
            corner_ids = list(corner_storage[ind].ids.reshape(-1))
            intersection_inds = [
                point_ind for point_ind in corner_ids if
                point_ind in all_points and all_points[point_ind] is not None
            ]
            # There is an assert, so points number has to be at least 5
            if len(intersection_inds) < 5:
                continue
            # print(ind, len(corner_storage), len(corner_storage[ind].points), np.max(intersection_inds))
            corner_points = {
                i: p
                for i, p in zip(corner_ids, corner_storage[ind].points)
            }
            intersection_points_c = np.array(
                [corner_points[i] for i in intersection_inds])
            intersection_points_f = np.array(
                [all_points[i] for i in intersection_inds])
            # print(intersection_points_f)
            # print(intersection_points_f.shape, intersection_points_c.shape, intrinsic_mat)
            retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                intersection_points_f, intersection_points_c, intrinsic_mat,
                None)

            if not retval:
                continue

            print(f'Processing frame: {ind}')

            newly_none_number = 0
            for i in intersection_inds:
                if i not in inliers:
                    newly_none_number += 1
                    all_points[i] = None

            print(
                f'{newly_none_number} points filled as None, len of inliers: {inliers.shape[0]}'
            )
            print(
                f'Number of not points: {len([p for p in all_points.keys() if all_points[p] is not None])}'
            )

            view_mats_tmp[ind] = rodrigues_and_translation_to_view_mat3x4(
                rvec, tvec)

            for not_none_ind in not_none_mats:
                add_new_points(corner_storage, view_mats_tmp, ind,
                               not_none_ind, all_points, intrinsic_mat)

            not_none_mats.append(ind)
            none_mats.remove(ind)
            break

        if len(not_none_mats) == old_len:
            break
        old_len = len(not_none_mats)

    view_mats = [None for _ in range(len(corner_storage))]
    for view_mat_ind in not_none_mats:
        view_mats[view_mat_ind] = view_mats_tmp[view_mat_ind]
    last_ind = 0
    for i in range(len(view_mats)):
        if view_mats[i] is None:
            view_mats[i] = view_mats[last_ind]
        else:
            last_ind = i
    all_points = {k: v for k, v in all_points.items() if v is not None}
    point_cloud_builder = PointCloudBuilder(
        np.array(list(all_points.keys()), dtype=np.int),
        np.array(list(all_points.values())))

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 10
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(
        camera_parameters,
        rgb_sequence[0].shape[0]
    )

    # TODO: implement
    frame_count = len(corner_storage)
    view_mats = [None] * frame_count
    view_1_id, view_1 = known_view_1[0], pose_to_view_mat3x4(known_view_1[1])
    view_2_id, view_2 = known_view_2[0], pose_to_view_mat3x4(known_view_2[1])

    view_mats[view_1_id] = view_1
    view_mats[view_2_id] = view_2

    correspondences = build_correspondences(corner_storage[view_1_id], corner_storage[view_2_id], None)
    triangulation_parameters = TriangulationParameters(1, 5, 0)
    points, ids, _ = triangulate_correspondences(correspondences, view_1, view_2,
                                                 intrinsic_mat, TriangulationParameters(0.1, 0.1, 0))

    #corners_0 = corner_storage[0]
    point_cloud_builder = PointCloudBuilder(ids, points)

    unknown_frames = frame_count - 2
    known_frames = {view_1_id, view_2_id}

    while unknown_frames > 0:
        for i in range(frame_count):
            if view_mats[i] is None:
                corners = corner_storage[i]
                interesting_ids, in_corners, in_cloud = np.intersect1d(corners.ids.flatten(), point_cloud_builder.ids.flatten(), return_indices=True)
                points_2d = corners.points[in_corners]
                points_3d = point_cloud_builder.points[in_cloud]

                if len(ids) < 3:
                    continue

                method = cv2.SOLVEPNP_EPNP
                if len(ids) == 3:
                    method = cv2.SOLVEPNP_P3P
                retval, rvec, tvec, inliers = cv2.solvePnPRansac(points_3d, points_2d, intrinsic_mat, None,
                                                                 flags=method)
                if not retval:
                    continue
                retval, rvec, tvec = cv2.solvePnP(points_3d[inliers], points_2d[inliers], intrinsic_mat, None,
                                                  rvec=rvec, tvec=tvec, useExtrinsicGuess=True)
                if not retval:
                    continue

                view_mats[i] = rodrigues_and_translation_to_view_mat3x4(rvec, tvec)
                unknown_frames -= 1
                known_frames.add(i)

                outliers = np.delete(interesting_ids, inliers)
                for frame in known_frames:
                    correspondences = build_correspondences(corners, corner_storage[frame], outliers)
                    points_3d, corner_ids, _ = triangulate_correspondences(
                        correspondences, view_mats[i], view_mats[frame], intrinsic_mat, triangulation_parameters)
                    point_cloud_builder.add_points(corner_ids, points_3d)




    calc_point_cloud_colors(
        point_cloud_builder,
        rgb_sequence,
        view_mats,
        intrinsic_mat,
        corner_storage,
        5.0
    )
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 11
0
def track_and_calc_colors(
    camera_parameters: CameraParameters,
    corner_storage: CornerStorage,
    frame_sequence_path: str,
    known_view_1: Optional[Tuple[int, Pose]] = None,
    known_view_2: Optional[Tuple[int, Pose]] = None
) -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])
    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = initial_camera_views(
            corner_storage, intrinsic_mat)
    print(known_view_1[1])
    print(known_view_2[1])

    print(known_view_1[0], known_view_2[0])

    print(intrinsic_mat)

    points_3d, points_ids = calc_starting_points(intrinsic_mat, corner_storage,
                                                 known_view_1, known_view_2)

    #initial_points_3d, initial_points_ids = points_3d.copy(), points_ids.copy()

    n_points = corner_storage.max_corner_id() + 1
    res_points_3d = np.full((n_points, 3), None)
    #res_points_3d[points_ids] = points_3d

    view_mats = [np.full((3, 4), None) for _ in range(len(corner_storage))]
    view_mats[known_view_1[0]], view_mats[
        known_view_2[0]] = pose_to_view_mat3x4(
            known_view_1[1]), pose_to_view_mat3x4(known_view_2[1])

    print(f'len(corner_storage):{len(corner_storage)}')

    id1 = known_view_1[0]
    for n_frame, corners in enumerate(corner_storage[id1 + 1:], id1 + 1):
        common_obj, common_img, common_ids = get_common_points(
            corners, points_ids, points_3d)

        mat, points_3d, points_ids, reprojection_error = build_view_mat(
            common_obj, common_img, common_ids, intrinsic_mat)
        points_ids = np.array(points_ids)
        view_mats[n_frame] = mat

        is_null = np.array([(x[0] is None) for x in res_points_3d[points_ids]])
        #res_points_3d[points_ids[is_null]] = points_3d[is_null]

        n_inliers = len(points_3d)

        points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                corner_storage, view_mats,
                                                points_3d, points_ids)

        print(
            f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
            f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
        )

    for n_frame, corners in list(enumerate(corner_storage))[::-1]:
        common_obj, common_img, common_ids = get_common_points(
            corners, points_ids, points_3d)

        mat, points_3d, points_ids, reprojection_error = build_view_mat(
            common_obj, common_img, common_ids, intrinsic_mat)
        view_mats[n_frame] = mat

        is_null = np.array([(x[0] is None) for x in res_points_3d[points_ids]])
        #res_points_3d[points_ids[is_null]] = points_3d[is_null]

        n_inliers = len(points_3d)

        points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                corner_storage, view_mats,
                                                points_3d, points_ids)

        print(
            f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
            f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
        )

    # Approximating

    n_iter = 1
    for iter in range(n_iter):
        for n_frame, corners in enumerate(corner_storage):
            common_obj, common_img, common_ids = get_common_points(
                corners, points_ids, points_3d)

            mat, points_3d, points_ids, reprojection_error = build_view_mat(
                common_obj, common_img, common_ids, intrinsic_mat)

            view_mats[n_frame] = mat
            if iter == n_iter - 1:
                res_points_3d[points_ids] = points_3d

            n_inliers = len(points_3d)

            points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                    corner_storage, view_mats,
                                                    points_3d, points_ids)

            print(
                f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
                f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
            )

        for n_frame, corners in list(enumerate(corner_storage))[::-1]:
            common_obj, common_img, common_ids = get_common_points(
                corners, points_ids, points_3d)

            mat, points_3d, points_ids, reprojection_error = build_view_mat(
                common_obj, common_img, common_ids, intrinsic_mat)

            view_mats[n_frame] = mat
            if iter == n_iter - 1:
                res_points_3d[points_ids] = points_3d

            n_inliers = len(points_3d)

            points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                    corner_storage, view_mats,
                                                    points_3d, points_ids)

            print(
                f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
                f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
            )

    res_points_ids = np.array(
        [i for i, x in enumerate(res_points_3d) if x[0] is not None])
    res_points_3d = np.array(res_points_3d[res_points_ids], dtype=float)

    point_cloud_builder = PointCloudBuilder(ids=res_points_ids,
                                            points=res_points_3d)

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)

    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 12
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    triang_params = TriangulationParameters(
        max_reprojection_error=MAX_PROJ_ERROR,
        min_triangulation_angle_deg=MIN_TRIANG_ANG,
        min_depth=MIN_DEPTH)

    correspondences = build_correspondences(corner_storage[known_view_1[0]],
                                            corner_storage[known_view_2[0]])
    view_mat_1 = pose_to_view_mat3x4(known_view_1[1])
    view_mat_2 = pose_to_view_mat3x4(known_view_2[1])
    points, ids, median_cos = triangulate_correspondences(
        correspondences, view_mat_1, view_mat_2, intrinsic_mat, triang_params)
    if len(points) < 10:
        print("\rPlease, try other frames")
        exit(0)

    view_mats = [None] * len(rgb_sequence)
    view_mats[known_view_1[0]] = view_mat_1
    view_mats[known_view_2[0]] = view_mat_2

    point_cloud_builder = PointCloudBuilder(ids, points)
    was_update = True
    while was_update:
        was_update = False
        for i, (frame, corners) in enumerate(zip(rgb_sequence,
                                                 corner_storage)):
            if view_mats[i] is not None:
                continue
            _, (idx_1,
                idx_2) = snp.intersect(point_cloud_builder.ids.flatten(),
                                       corners.ids.flatten(),
                                       indices=True)
            try:
                retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                    point_cloud_builder.points[idx_1],
                    corners.points[idx_2],
                    intrinsic_mat,
                    distCoeffs=None)
                inliers = np.array(inliers, dtype=int)
                if len(inliers) > 0:
                    view_mats[i] = rodrigues_and_translation_to_view_mat3x4(
                        rvec, tvec)
                    was_update = True
                print(
                    f"\rFrame {i} out of {len(rgb_sequence)}, inliners: {len(inliers)}",
                    end="")
            except Exception:
                print(f"\rFrame {i} out of {len(rgb_sequence)}, inliners: {0}",
                      end="")
            if view_mats[i] is None:
                continue
            cur_corner = filter_frame_corners(corner_storage[i],
                                              inliers.flatten())

            for j in range(len(rgb_sequence)):
                if view_mats[j] is None:
                    continue
                correspondences = build_correspondences(
                    corner_storage[j], cur_corner)
                if len(correspondences.ids) == 0:
                    continue
                points, ids, median_cos = triangulate_correspondences(
                    correspondences, view_mats[j], view_mats[i], intrinsic_mat,
                    triang_params)
                point_cloud_builder.add_points(ids, points)

        print()

    first_mat = next((mat for mat in view_mats if mat is not None), None)
    if first_mat is None:
        print("\rFail")
        exit(0)

    view_mats[0] = first_mat
    for i in range(1, len(view_mats)):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i - 1]
    view_mats = np.array(view_mats)

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Esempio n. 13
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    frames_cnt = len(rgb_sequence)
    unused = np.array([
        i for i in range(0, frames_cnt)
        if i not in [known_view_1[0], known_view_2[0]]
    ])
    used = np.array([known_view_1[0], known_view_2[0]])
    used_pose = [known_view_1[1], known_view_2[1]]

    points, ids, cos = triangulate(known_view_1[0], known_view_2[0],
                                   known_view_1[1], known_view_2[1],
                                   corner_storage, intrinsic_mat, None)
    point_cloud_builder = PointCloudBuilder(ids, points)
    # print("Add frames", *known_ids)

    while unused.shape[0] > 0:
        added = []
        for i in range(len(unused)):
            pose_i, ids_to_remove = camera_pose(unused[i], corner_storage,
                                                point_cloud_builder,
                                                intrinsic_mat)
            if pose_i is None:
                continue

            for j in range(len(used)):
                points, ids, cos = triangulate(unused[i], used[j], pose_i,
                                               used_pose[j], corner_storage,
                                               intrinsic_mat, ids_to_remove)
                point_cloud_builder.add_points(ids, points)

            used = np.append(used, [unused[i]])
            used_pose.append(pose_i)
            added.append(unused[i])
            # print("Frame", unused[i], "done!!")

        if len(added) == 0:
            break
        unused = np.setdiff1d(unused, added)

        used_pose = recalculate_poses(used, point_cloud_builder,
                                      corner_storage, intrinsic_mat)

    view_mats = [None for i in range(frames_cnt)]
    for i in range(len(used)):
        view_mats[used[i]] = pose_to_view_mat3x4(used_pose[i])

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud