Example #1
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(
        camera_parameters,
        rgb_sequence[0].shape[0]
    )

    # TODO: implement
    frame_count = len(corner_storage)
    view_mats = [pose_to_view_mat3x4(known_view_1[1])] * frame_count
    corners_0 = corner_storage[0]
    point_cloud_builder = PointCloudBuilder(corners_0.ids[:1],
                                            np.zeros((1, 3)))

    calc_point_cloud_colors(
        point_cloud_builder,
        rgb_sequence,
        view_mats,
        intrinsic_mat,
        corner_storage,
        5.0
    )
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #2
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(
        camera_parameters,
        rgb_sequence[0].shape[0]
    )

    view_mats, point_ids, points3d, corner_storage = track(intrinsic_mat, corner_storage,
                                                            known_view_1, known_view_2)

    point_cloud_builder = PointCloudBuilder(point_ids, points3d)

    calc_point_cloud_colors(
        point_cloud_builder,
        rgb_sequence,
        view_mats,
        intrinsic_mat,
        corner_storage,
        5.0
    )
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #3
0
def track_camera(corner_storage, intrinsic_mat, known_view1, known_view2):
    n = len(corner_storage)
    point_cloud_builder = PointCloudBuilder()
    view_mat = [eye3x4()] * n

    view_mat[known_view1[0]] = pose_to_view_mat3x4(known_view1[1])
    view_mat[known_view2[0]] = pose_to_view_mat3x4(known_view2[1])

    min_angle = 1.0
    max_error = 1.0
    new_points = []
    ids = []
    correspondences = build_correspondences(corner_storage[known_view1[0]],
                                            corner_storage[known_view2[0]])
    while len(new_points) < 10:
        new_points, ids, _ = triangulate_correspondences(
            correspondences, pose_to_view_mat3x4(known_view1[1]),
            pose_to_view_mat3x4(known_view2[1]), intrinsic_mat,
            TriangulationParameters(max_error, min_angle, 0))
        min_angle -= 0.2
        max_error += 0.4
    point_cloud_builder.add_points(ids, new_points)

    for i in range(known_view1[0] - 1, -1, -1):
        solve_frame(i, corner_storage, point_cloud_builder, view_mat,
                    intrinsic_mat, 1)

    for i in range(known_view1[0] + 1, n):
        solve_frame(i, corner_storage, point_cloud_builder, view_mat,
                    intrinsic_mat, -1)
    return view_mat, point_cloud_builder
Example #4
0
def track(iters, trackers, cloud, camera):
    start_ids, start_points = cloud.ids, cloud.points

    for iter in range(iters):
        for frame_num, t1 in enumerate(trackers):
            print("\rIteration {}/{}, triangulate for frame {}/{}".format(
                iter + 1, iters, t1.id, len(trackers)),
                  end=' ' * 20)
            if t1.mtx is None:
                continue
            for j in range(frame_num - FRAMES_MAX_WINDOW,
                           frame_num + FRAMES_MAX_WINDOW + 1, FRAMES_STEP):
                if j < 0 or j >= len(trackers):
                    continue
                t2 = trackers[j]
                if abs(t1.id - t2.id) <= FRAMES_MIN_WINDOW or t2.mtx is None:
                    continue
                corrs = build_correspondences(t1.corners, t2.corners)
                if not len(corrs.ids):
                    continue
                points, ids, _ = triangulate_correspondences(
                    corrs, t1.mtx, t2.mtx, camera, triang_params)
                if len(points):
                    cloud.add_points(ids, points)
        fc = FrequencyCounter()
        for t in trackers:
            inliers = t.pnp(cloud, camera)
            inliers = np.array(inliers).flatten().astype(int)
            print("\rIteration {}/{}, PnP for frame {}/{}, {} inliners".format(
                iter + 1, iters, t.id, len(trackers), len(inliers)),
                  end=' ' * 20)
            if len(inliers):
                ids1, ids2 = build_index_intersection(cloud.ids, inliers)
                fc.add(inliers[ids2], cloud.points[ids1], t.corners.ids)
        good_points_ids, good_points = fc.get_freqs_above(
            INLINER_FREQUENCY_TRASHHOLD, MIN_INLINER_FRAMES)
        cloud = PointCloudBuilder(good_points_ids, good_points)
        for t in trackers:
            t.pnp(cloud, camera)

        print("\rIteration {}/{}, {} points in the cloud".format(
            iter + 1, iters, len(cloud.ids)),
              end=' ' * 20 + "\n")

    fc = FrequencyCounter()
    for t in trackers:
        inliers = t.pnp(cloud, camera)
        inliers = np.array(inliers).flatten().astype(int)
        if len(inliers):
            ids1, ids2 = build_index_intersection(cloud.ids, inliers)
            fc.add(inliers[ids2], cloud.points[ids1], t.corners.ids)
    good_points_ids, good_points = fc.get_freqs_above(
        INLINER_FREQUENCY_TRASHHOLD, MIN_INLINER_FRAMES)
    cloud = PointCloudBuilder(good_points_ids, good_points)

    return cloud
Example #5
0
    def __init__(self, corner_storage: CornerStorage,
                 intrinsic_mat: np.ndarray,
                 parameters: TriangulationParameters,
                 known_view_1: Tuple[int, Pose], known_view_2: Tuple[int,
                                                                     Pose]):
        self._corner_storage = corner_storage
        self._intrinsic_mat = intrinsic_mat
        self._triangulation_parameters = parameters
        self._length = len(corner_storage)
        self._builder = PointCloudBuilder()
        self._track = [None] * self._length

        self._track[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
        self._track[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])
        self._update_cloud(known_view_1[0], known_view_2[0])
Example #6
0
    def __init__(self, corner_storage, intrinsic_mat, view_1, view_2):
        self.corner_storage = corner_storage
        self.frame_count = len(corner_storage)
        self.intrinsic_mat = intrinsic_mat
        self.triangulation_parameters = TriangulationParameters(1, 3, .1)
        self.view_mats = {}

        if view_1 is None or view_2 is None:
            print('Finding initial poses')
            pose1_idx, pose1, pose2_idx, pose2 = self.find_best_start_poses()
            print('Initial poses found in frames {0} and {1}'.format(pose1_idx, pose2_idx))
        else:
            pose1 = pose_to_view_mat3x4(view_1[1])
            pose2 = pose_to_view_mat3x4(view_2[1])
            pose1_idx = view_1[0]
            pose2_idx = view_2[0]

        corners_1 = self.corner_storage[pose1_idx]
        corners_2 = self.corner_storage[pose2_idx]

        corrs = build_correspondences(corners_1, corners_2)

        p, ids, med = triangulate_correspondences(corrs, pose1, pose2,
                                                  self.intrinsic_mat, self.triangulation_parameters)

        self.ids = ids
        self.point_cloud_builder = PointCloudBuilder(ids, p)
        self.point_frames = {}
        self.last_retriangulated = {}
        for i in ids:
            self.point_frames[i] = [pose1_idx, pose2_idx]
            self.last_retriangulated[i] = 2

        self.used_inliers = {}
        self.view_nones = {i for i in range(self.frame_count)}
        self.view_mats[pose1_idx] = pose1
        self.used_inliers[pose1_idx] = 0
        self.view_nones.remove(pose1_idx)
        self.view_mats[pose2_idx] = pose2
        self.used_inliers[pose2_idx] = 0
        self.view_nones.remove(pose2_idx)
        self.last_added_idx = pose2_idx
        self.last_inliers = []

        self.retriangulate_frames = 3
        self.max_repr_error = 1.5
        self.used_inliers_point = {}
        self.step = 0
Example #7
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(
        camera_parameters,
        rgb_sequence[0].shape[0]
    )
    view_mats, point_cloud_builder = [eye3x4()] * len(corner_storage), PointCloudBuilder()

    try:
        known_view_1, known_view_2 = _find_pos_pair(intrinsic_mat, corner_storage)
        tracker = CameraTracker(intrinsic_mat, corner_storage, known_view_1, known_view_2)
        view_mats, point_cloud_builder = tracker.track()
    except TrackingError as error:
        print(error)
        print('Poses and point cloud are not calculated')

    calc_point_cloud_colors(
        point_cloud_builder,
        rgb_sequence,
        view_mats,
        intrinsic_mat,
        corner_storage,
        5.0
    )
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #8
0
    def __init__(self, intrinsic_mat, corner_storage, known_view_1, known_view_2):
        self.intrinsic_mat = intrinsic_mat
        self.corner_storage = corner_storage
        frame_num_1 = known_view_1[0]
        frame_num_2 = known_view_2[0]
        self.pc_builder = PointCloudBuilder()
        view_matr_1 = pose_to_view_mat3x4(known_view_1[1])
        view_matr_2 = pose_to_view_mat3x4(known_view_2[1])

        self.tracked_views = [eye3x4()] * len(self.corner_storage)
        self.tracked_views[frame_num_1] = view_matr_1
        self.tracked_views[frame_num_2] = view_matr_2
        self.initial_baseline = get_baseline(view_matr_1, view_matr_2)
        print("initial baseline =", self.initial_baseline)

        self._add_points_from_frame(frame_num_1, frame_num_2, initial_triangulation=True)
        self.min_init_frame = min(frame_num_1, frame_num_2)
        self.max_init_frame = max(frame_num_1, frame_num_2)
        self.outliers = set()
Example #9
0
 def __init__(self,
              camera_parameters: CameraParameters,
              corner_storage: CornerStorage,
              frame_sequence_path: str,
              known_view_1: Optional[Tuple[int, Pose]],
              known_view_2: Optional[Tuple[int, Pose]]
              ):
     self.rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
     self.frame_count = len(self.rgb_sequence)
     self.camera_parameters = camera_parameters
     self.corner_storage = without_short_tracks(corner_storage, CORNER_MIN_TRACK_LENGTH)
     self.intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters, self.rgb_sequence[0].shape[0])
     self.point_cloud_builder = PointCloudBuilder()
     self.known_view_mats = {}
     self.unknown_view_ids = set(range(self.frame_count))
     if known_view_1 is None or known_view_2 is None:
         self.initialize_known_views()
     else:
         for (frame_idx, pose) in (known_view_1, known_view_2):
             self.update_view(frame_idx, pose_to_view_mat3x4(pose))
Example #10
0
    def track(self):
        step_num = 1
        num_of_defined_poses = np.sum([
            track_pos_info is not None for track_pos_info in self.tracked_poses
        ])
        while num_of_defined_poses != self.num_of_frames:
            unsolved_frames = [
                i for i in range(self.num_of_frames)
                if self.tracked_poses[i] is None
            ]

            new_cams_info = []
            for frame, cam_info in zip(unsolved_frames,
                                       map(self._get_pos, unsolved_frames)):
                if cam_info is not None:
                    new_cams_info.append((frame, cam_info))

            if len(new_cams_info) == 0:
                print(f'Can not get more camera positions, '
                      f'{self.num_of_frames - num_of_defined_poses}'
                      f' frames left without defined camera position')
                self.tracked_poses = [
                    CameraInfo(view_mat3x4_to_pose(eye3x4()), 0)
                    if pos is None else pos for pos in self.tracked_poses
                ]
                break

            best_frame = None
            best_new_cam_info = None

            for frame, cam_info in new_cams_info:
                if best_new_cam_info is None or best_new_cam_info.inliers < cam_info.inliers:
                    best_new_cam_info = cam_info
                    best_frame = frame

            print('Added camera position for frame ', best_frame)
            print('Number of inliers: ', best_new_cam_info.inliers)

            self.tracked_poses[best_frame] = best_new_cam_info

            self._retriangulate_3d_points(best_frame)

            if step_num % 4 == 0:
                self._update_camera_poses()
            step_num += 1
            num_of_defined_poses = np.sum([
                tracked_pos_info is not None
                for tracked_pos_info in self.tracked_poses
            ])
            print(
                f'{num_of_defined_poses}/{self.num_of_frames} camera positions found, {len(self.point_cloud)} points in cloud'
            )

        self._update_camera_poses()

        ids, cloud_points = [], []
        for pt_id, clout_pt_info in self.point_cloud.items():
            ids.append(pt_id)
            cloud_points.append(clout_pt_info.pos)
        return list(map(lambda tracked_pos_info: tracked_pos_info.pos, self.tracked_poses)), \
               PointCloudBuilder(np.array(ids), np.array(cloud_points))
Example #11
0
def track_and_find_point_cloud(intrinsic_mat, corner_storage, known_view_1, known_view_2):
    num_frames = len(corner_storage)

    view_mats = [None] * num_frames
    point_cloud_builder = PointCloudBuilder()

    print(f'Frames with known views: {known_view_1[0]} and {known_view_2[0]}')
    view_mats[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])

    triang_params = TriangulationParameters(max_reprojection_error=MAX_REPR_ERR,
                                            min_triangulation_angle_deg=MIN_TRIANG_ANGLE_DEG,
                                            min_depth=MIN_DEPTH)

    add_points_to_cloud(point_cloud_builder,
                        corner_storage[known_view_1[0]],
                        corner_storage[known_view_2[0]],
                        view_mats[known_view_1[0]],
                        view_mats[known_view_2[0]],
                        intrinsic_mat,
                        triang_params)

    while True:
        was_updated = False

        for i in range(num_frames):
            if view_mats[i] is not None:
                continue

            print(f"\nCurrent frame: {i}")

            # find intersection of current point cloud and current frame
            corners = corner_storage[i]
            corner_ids = []
            points_3d = []
            points_2d = []

            for id, corner in zip(corners.ids, corners.points):
                if id not in point_cloud_builder.ids:
                    continue
                ind_in_builder, _ = np.nonzero(point_cloud_builder.ids == id)
                corner_ids.append(id)
                points_3d.append(point_cloud_builder.points[ind_in_builder[0]])
                points_2d.append(corner)

            print(f"Size of intersection of current point cloud and current frame: {len(corner_ids)}")

            if len(corner_ids) < 5:
                continue

            points_3d = np.array(points_3d)
            points_2d = np.array(points_2d)

            retval, rvec, tvec, inliers = cv2.solvePnPRansac(points_3d.reshape((-1, 1, 3)),
                                                             points_2d.reshape((-1, 1, 2)),
                                                             cameraMatrix=intrinsic_mat,
                                                             distCoeffs=None,
                                                             reprojectionError=PNP_REPROJ_ERROR)

            if not retval:
                print("Unsuccessful solution of PnP")
                continue

            print(f"Position found by {len(inliers)} inliers")

            view_mat = rodrigues_and_translation_to_view_mat3x4(rvec, tvec)
            view_mats[i] = view_mat

            for j in range(num_frames):
                if i != j and view_mats[j] is not None:
                    points_added = add_points_to_cloud(point_cloud_builder,
                                        corner_storage[i],
                                        corner_storage[j],
                                        view_mats[i],
                                        view_mats[j],
                                        intrinsic_mat,
                                        triang_params)
                    if points_added > 0:
                        was_updated = True

            print(f"Current size of point cloud: {point_cloud_builder.points.size}")

        if was_updated is False:
            break

    for i in range(num_frames):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i - 1]

    return view_mats, point_cloud_builder
Example #12
0
class CameraTracker:

    def __init__(self,
                 camera_parameters: CameraParameters,
                 corner_storage: CornerStorage,
                 frame_sequence_path: str,
                 known_view_1: Optional[Tuple[int, Pose]],
                 known_view_2: Optional[Tuple[int, Pose]]
                 ):
        self.rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
        self.frame_count = len(self.rgb_sequence)
        self.camera_parameters = camera_parameters
        self.corner_storage = without_short_tracks(corner_storage, CORNER_MIN_TRACK_LENGTH)
        self.intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters, self.rgb_sequence[0].shape[0])
        self.point_cloud_builder = PointCloudBuilder()
        self.known_view_mats = {}
        self.unknown_view_ids = set(range(self.frame_count))
        if known_view_1 is None or known_view_2 is None:
            self.initialize_known_views()
        else:
            for (frame_idx, pose) in (known_view_1, known_view_2):
                self.update_view(frame_idx, pose_to_view_mat3x4(pose))

    def update_view(self, frame_idx: int, view_mat: np.ndarray):
        self.known_view_mats[frame_idx] = view_mat
        self.unknown_view_ids.remove(frame_idx)

    def track_camera(self):
        last_idx, last_view = next(iter(self.known_view_mats.items()))
        while self.unknown_view_ids:
            for (idx, view) in self.known_view_mats.items():
                if idx != last_idx:
                    self.enrich_point_cloud(last_view, view, self.corner_storage[last_idx], self.corner_storage[idx],
                                            TRIANGULATION_PARAMS)
            # TODO: multi-threading here
            restored_views = map(lambda i: (i, self.try_restore_view(i)), self.unknown_view_ids)
            restored_views = list(filter(lambda val: val[1][0] is not None, restored_views))
            if len(restored_views) == 0:
                raise RuntimeError('No more views can be restored!')
            new_view_tuple = min(restored_views, key=lambda val: val[1][2])
            (new_idx, (new_view, inliers_count, _)) = new_view_tuple
            self.update_view(new_idx, new_view)
            print('Restored position at frame {} with {} inliers'.format(new_idx, inliers_count))

    def enrich_point_cloud(self, view_1: np.ndarray, view_2: np.ndarray, corners_1: FrameCorners,
                           corners_2: FrameCorners,
                           triangulation_parameters: TriangulationParameters) -> None:
        correspondences = build_correspondences(corners_1, corners_2)
        points, ids, median_cos = triangulate_correspondences(
            correspondences,
            view_1,
            view_2,
            self.intrinsic_mat,
            triangulation_parameters)
        self.point_cloud_builder.add_points(ids, points)

    def try_restore_view(self, frame_idx: int) -> Tuple[Optional[np.ndarray], int, np.float64]:
        corners = self.corner_storage[frame_idx]
        _, (corner_ids, cloud_ids) = snp.intersect(corners.ids.flatten(),
                                                   np.sort(self.point_cloud_builder.ids.flatten()), indices=True)
        points_3d = self.point_cloud_builder.points[cloud_ids]
        points_2d = corners.points[corner_ids]
        result = self.try_solve_pnp(points_3d, points_2d)
        if result is None:
            return None, 0, np.inf
        view, inlier_ids = result
        inlier_ids = inlier_ids.flatten()
        reprojection_errors = compute_reprojection_errors(points_3d[inlier_ids], points_2d[inlier_ids],
                                                          self.intrinsic_mat @ view)
        return view, len(inlier_ids), reprojection_errors.mean()

    def try_solve_pnp(self, points_3d: np.ndarray, points_2d: np.ndarray) -> Optional[Tuple[np.ndarray, np.ndarray]]:
        if len(points_3d) < 6:
            return None
        ok, r_vec, t_vec, inliers = cv2.solvePnPRansac(
            objectPoints=points_3d,
            imagePoints=points_2d,
            cameraMatrix=self.intrinsic_mat,
            distCoeffs=None,
            confidence=PNP_RANSAC_CONFIDENCE,
            reprojectionError=PNP_RANSAC_INLIERS_MAX_REPROJECTION_ERROR,
            flags=cv2.SOLVEPNP_EPNP
        )
        if not ok:
            return None
        inliers = inliers.flatten()

        if len(inliers) < 6:
            return None

        _, r_vec, t_vec = cv2.solvePnP(
            points_3d,
            points_2d,
            self.intrinsic_mat,
            distCoeffs=None,
            rvec=r_vec,
            tvec=t_vec,
            useExtrinsicGuess=True,
            flags=cv2.SOLVEPNP_ITERATIVE
        )

        return rodrigues_and_translation_to_view_mat3x4(r_vec, t_vec), inliers

    def initialize_known_views(self):
        median_corners = np.median(np.array([len(self.corner_storage[i].ids) for i in range(self.frame_count)]))

        def score(result):
            _, _, _, inliers_count, median_cos = result
            return (inliers_count / median_corners) ** 2 + (1 - median_cos ** 2)

        leniency = 1.0
        while True:
            print('Trying to initialize views...')
            best_view_result = max(self.initial_views_generator(leniency), key=score, default=None)
            if best_view_result is None:
                print('Initialization failed, relaxing constraints...')
                leniency *= 1.05
                continue
            frame_1, frame_2, view, _, _ = best_view_result
            print('Found initial poses for frames {} and {}'.format(frame_1, frame_2))
            self.update_view(frame_1, eye3x4())
            self.update_view(frame_2, view)
            return

    def initial_views_generator(self, leniency: np.float64):
        for frame_1 in range(self.frame_count):
            print('Trying to match frame {} with a subsequent frame...'.format(frame_1))
            for frame_2 in range(frame_1 + INIT_MIN_FRAME_DISTANCE,
                                 min(frame_1 + 1 + INIT_MAX_FRAME_DISTANCE, self.frame_count)):
                correspondences = build_correspondences(self.corner_storage[frame_1], self.corner_storage[frame_2])
                result = self.try_restore_relative_pose(
                    correspondences,
                    INIT_MAX_LINE_DISTANCE * leniency,
                    INIT_HOMOGRAPHY_MAX_REPROJECTION_ERROR / leniency,
                    INIT_MAX_HOMOGRAPHY_INLIER_RATIO
                )
                if result is not None:
                    yield frame_1, frame_2, *result

    def try_restore_relative_pose(self, correspondences: Correspondences,
                                  distance_inlier_threshold: np.float64,  # 1.0
                                  reprojection_threshold: np.float64,  # 3.0
                                  homography_inlier_ration_threshold: np.float64) \
            -> Optional[Tuple[np.ndarray, int, np.float]]:
        if len(correspondences.ids) < 6:
            return None
        essential_mat, essential_inliers_mask = cv2.findEssentialMat(
            correspondences.points_1,
            correspondences.points_2,
            self.intrinsic_mat,
            method=cv2.RANSAC,
            prob=INIT_RANSAC_CONFIDENCE,
            threshold=distance_inlier_threshold
        )

        _, homography_inliers_mask = cv2.findHomography(
            correspondences.points_1,
            correspondences.points_2,
            method=cv2.RANSAC,
            ransacReprojThreshold=reprojection_threshold
        )

        homography_inlier_ratio = homography_inliers_mask.sum(axis=None) / essential_inliers_mask.sum(axis=None)
        if homography_inlier_ratio > homography_inlier_ration_threshold:
            return None

        recover_inliers_count, r_mat, t_vec, recover_inliers_mask = cv2.recoverPose(
            essential_mat,
            correspondences.points_1,
            correspondences.points_2,
            self.intrinsic_mat,
            mask=essential_inliers_mask
        )
        recover_inliers_mask = recover_inliers_mask.flatten()
        assert (recover_inliers_mask.sum() == recover_inliers_count)

        view = np.hstack([r_mat, t_vec])

        correspondences = Correspondences(
            correspondences.ids[recover_inliers_mask],
            correspondences.points_1[recover_inliers_mask],
            correspondences.points_2[recover_inliers_mask]
        )
        _, ids, median_cos = triangulate_correspondences(correspondences, eye3x4(), view, self.intrinsic_mat,
                                                         TRIANGULATION_PARAMS)
        return view, len(ids), median_cos
Example #13
0
class CameraTracker:
    def __init__(self, corner_storage: CornerStorage,
                 intrinsic_mat: np.ndarray,
                 parameters: TriangulationParameters,
                 known_view_1: Tuple[int, Pose], known_view_2: Tuple[int,
                                                                     Pose]):
        self._corner_storage = corner_storage
        self._intrinsic_mat = intrinsic_mat
        self._triangulation_parameters = parameters
        self._length = len(corner_storage)
        self._builder = PointCloudBuilder()
        self._track = [None] * self._length

        self._track[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
        self._track[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])
        self._update_cloud(known_view_1[0], known_view_2[0])

    def _update_cloud(self, ind1, ind2):
        frame1, frame2 = self._corner_storage[ind1], self._corner_storage[ind2]
        mat1, mat2 = self._track[ind1], self._track[ind2]
        correspondences = build_correspondences(frame1, frame2)
        if len(correspondences.ids) == 0:
            return 0
        points, ids, _ = triangulate_correspondences(
            correspondences, mat1, mat2, self._intrinsic_mat,
            self._triangulation_parameters)
        self._builder.add_points(ids, points)
        return len(ids)

    def track(self):
        for cur_index in range(self._length):
            print(f'Processing frame {cur_index}/{self._length}')
            corners = self._corner_storage[cur_index]
            ids = []
            object_points = []
            image_points = []
            for id, point in zip(corners.ids, corners.points):
                indices_x, _ = np.where(self._builder.ids == id)
                if len(indices_x) == 0:
                    continue
                ids.append(id)
                object_points.append(self._builder.points[indices_x[0]])
                image_points.append(point)
            if len(object_points) < 5:
                continue
            retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                np.array(object_points),
                np.array(image_points),
                self._intrinsic_mat,
                None,
                flags=cv2.SOLVEPNP_EPNP)
            if not retval:
                continue
            view_mat = rodrigues_and_translation_to_view_mat3x4(rvec, tvec)
            print(f'Position based on {len(inliers)} inliers')
            self._track[cur_index] = view_mat
            updatesCount = sum([self._update_cloud(cur_index, next_index) for next_index in range(cur_index) \
                                if self._track[next_index] is not None])
            print(
                f'Points updated:{updatesCount} Current cloud size:{len(self._builder.ids)}'
            )
        for i in range(len(self._track)):
            self._track[i] = self._track[i] if self._track[
                i] is not None else self._track[i - 1]
        return np.array(self._track), self._builder
Example #14
0
def track_and_calc_colors(
    camera_parameters: CameraParameters,
    corner_storage: CornerStorage,
    frame_sequence_path: str,
    known_view_1: Optional[Tuple[int, Pose]] = None,
    known_view_2: Optional[Tuple[int, Pose]] = None
) -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = calculate_known_views(
            intrinsic_mat, corner_storage)
        print('Calculated known views: {} and {}'.format(
            known_view_1[0], known_view_2[0]))

    random.seed(239)

    view_mats = np.full((len(rgb_sequence), ), None)
    view_mats[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])

    point_cloud_builder = PointCloudBuilder()

    def triangulate_and_add_points(corners1, corners2, view_mat1, view_mat2):
        points3d, ids, median_cos = triangulate_correspondences(
            build_correspondences(corners1, corners2), view_mat1, view_mat2,
            intrinsic_mat, triang_params)
        point_cloud_builder.add_points(ids, points3d)

    triangulate_and_add_points(corner_storage[known_view_1[0]],
                               corner_storage[known_view_2[0]],
                               view_mats[known_view_1[0]],
                               view_mats[known_view_2[0]])

    inliers_points3d, inliers_points2d = None, None

    def find_errs(rtvecs):
        rvecs, tvecs = rtvecs[:3], rtvecs[3:]
        rmat, _ = cv2.Rodrigues(np.expand_dims(rvecs, axis=1))
        rmat = np.concatenate((rmat, np.expand_dims(tvecs, axis=1)), axis=1)
        return (project_points(inliers_points3d, intrinsic_mat @ rmat) -
                inliers_points2d).flatten()

    while True:
        random_ids = list(range(len(view_mats)))
        random.shuffle(random_ids)
        found_new_view_mat = False
        for i in random_ids:
            if view_mats[i] is not None:
                continue
            common, (ids1,
                     ids2) = snp.intersect(point_cloud_builder.ids.flatten(),
                                           corner_storage[i].ids.flatten(),
                                           indices=True)
            if len(common) <= 10:
                continue

            points3d = point_cloud_builder.points[ids1]
            points2d = corner_storage[i].points[ids2]
            retval, rvecs, tvecs, inliers = cv2.solvePnPRansac(
                points3d,
                points2d,
                intrinsic_mat,
                iterationsCount=108,
                reprojectionError=triang_params.max_reprojection_error,
                distCoeffs=None,
                confidence=0.999,
                flags=cv2.SOLVEPNP_EPNP)
            if retval:
                # M-оценки
                inliers_points3d = points3d[inliers.flatten()]
                inliers_points2d = points2d[inliers.flatten()]
                rtvecs = least_squares(find_errs,
                                       x0=np.concatenate(
                                           (rvecs, tvecs)).flatten(),
                                       loss='huber',
                                       method='trf').x
                rvecs = rtvecs[:3].reshape((-1, 1))
                tvecs = rtvecs[3:].reshape((-1, 1))
            if not retval:
                continue

            print(
                'Iteration {}/{}, processing {}th frame: {} inliers, {} points in point cloud'
                .format(
                    len([v for v in view_mats if v is not None]) - 1,
                    len(rgb_sequence) - 2, i, len(inliers),
                    len(point_cloud_builder.points)))

            view_mats[i] = rodrigues_and_translation_to_view_mat3x4(
                rvecs, tvecs)
            found_new_view_mat = True

            # Add new points
            inliers = np.array(inliers).astype(int).flatten()
            inlier_corners = FrameCorners(
                *[c[inliers] for c in corner_storage[i]])
            for j in range(len(view_mats)):
                if view_mats[j] is not None:
                    triangulate_and_add_points(corner_storage[j],
                                               inlier_corners, view_mats[j],
                                               view_mats[i])

            sys.stdout.flush()

        if not found_new_view_mat:
            break

    for i in range(0, len(view_mats)):
        if view_mats[i] is None:
            print('Я сдох: не все кадры обработаны :(')
            exit(1)

    if len(view_mats) < 100:  # Иначе долго работает
        view_mats = bundle_adjustment(view_mats, point_cloud_builder,
                                      intrinsic_mat, corner_storage)

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #15
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])
    i_1 = known_view_1[0]
    i_2 = known_view_2[0]
    print('Known frames are', i_1, 'and', i_2)
    global INLIERS_MIN_SIZE, DELTA, MIN_SIZE, TRIANG_PARAMS
    view_mat3x4_1 = pose_to_view_mat3x4(known_view_1[1])
    view_mat3x4_2 = pose_to_view_mat3x4(known_view_2[1])
    correspondences = build_correspondences(corner_storage[i_1],
                                            corner_storage[i_2])
    print(len(correspondences.ids))
    points3d, corr_ids, median_cos = triangulate_correspondences(
        correspondences, view_mat3x4_1, view_mat3x4_2, intrinsic_mat,
        TRIANG_PARAMS)
    view_mats, point_cloud_builder = [view_mat3x4_1 for _ in corner_storage
                                      ], PointCloudBuilder(
                                          corr_ids.astype(np.int64), points3d)
    err_indexes = set(range(len(corner_storage)))
    err_indexes.remove(i_1)
    err_indexes.remove(i_2)
    res_code, rodrig, inliers_1, cloud_points = get_ransac(
        point_cloud_builder, corner_storage[i_1], intrinsic_mat)
    view_mats[i_1] = rodrig
    res_code, rodrig, inliers_2, cloud_points = get_ransac(
        point_cloud_builder, corner_storage[i_2], intrinsic_mat)
    view_mats[i_2] = rodrig
    INLIERS_MIN_SIZE = min(len(inliers_1), len(inliers_2)) - 20 * max(
        len(corner_storage) // 100, 1)
    descr_template = 'Point cloud calc - {} inliers, {} points found, cloud size is {}'

    tqdm_iterator = tqdm(range(len(corner_storage) - 2),
                         total=len(corner_storage),
                         desc=descr_template.format('?', '?', '?'))

    params = [
        corner_storage, view_mats, point_cloud_builder, intrinsic_mat,
        err_indexes, descr_template, tqdm_iterator
    ]
    DELTA = 5 * max(len(corner_storage) // 100, 1)
    MIN_SIZE = 20 * max(len(corner_storage) // 100, 1)
    TRIANG_PARAMS = TriangulationParameters(2, 1e-2, 1e-2)
    for i in range(i_1 + 1, i_2):
        update(i, *params)
    print('Points between handled')
    DELTA = 20 * max(len(corner_storage) // 100, 1)
    MIN_SIZE = 20 * max(len(corner_storage) // 100, 1)
    TRIANG_PARAMS = TriangulationParameters(4, 1e-2, 1e-2)
    for i in range(i_1, -1, -1):
        if i in err_indexes:
            update(i, *params)
    for i in range(i_2, len(corner_storage)):
        if i in err_indexes:
            update(i, *params)

    for i in range(i_1, -1, -1):
        if i in err_indexes:
            view_mats[i] = view_mats[i + 1]

    for i in range(i_2, len(corner_storage)):
        if i in err_indexes:
            view_mats[i] = view_mats[i - 1]
    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #16
0
def run_bundle_adjustment(intrinsic_mat: np.ndarray,
                          list_of_corners: List[FrameCorners],
                          max_inlier_reprojection_error: float,
                          view_mats: List[np.ndarray],
                          pc_builder: PointCloudBuilder) -> List[np.ndarray]:
    n_frames = len(view_mats)
    inliers = get_inliers(intrinsic_mat, list_of_corners,
                          max_inlier_reprojection_error, view_mats, pc_builder)
    rs = [cv2.Rodrigues(i[:, :3])[0].flatten() for i in view_mats]
    ts = [i[:, 3] for i in view_mats]
    point_id_to_cloud = -np.ones(pc_builder.ids.max() + 1, dtype=np.int32)
    point_id_to_cloud[pc_builder.ids.flatten()] = np.arange(len(
        pc_builder.ids))

    re = calc_reprojection_error(inliers, pc_builder, point_id_to_cloud,
                                 list_of_corners, intrinsic_mat, view_mats)
    print('Reprojection error before bundle adjustment:', re)
    for start in range(0, n_frames, FRAMES_STEP):
        end = min(start + FRAMES_STEP, n_frames)
        cameras_params_dim = (end - start) * 6

        matches = get_matches(inliers[start:end], len(list_of_corners), start)
        relevant_point_ids = np.array(
            list(
                sorted(
                    {point_id
                     for point_id, frame_id, point_index in matches})))
        point_id_to_cur = {p: i for i, p in enumerate(relevant_point_ids)}
        _, (_, cloud_indices) = snp.intersect(relevant_point_ids,
                                              pc_builder.ids.flatten(),
                                              indices=True)
        relevant_points = pc_builder.points[cloud_indices]

        alpha = ALPHA_INITIAL
        us_mean_prev = -float('inf')

        for i in range(ITERATIONS):
            us = get_residuals(pc_builder, point_id_to_cloud, list_of_corners,
                               intrinsic_mat, matches, view_mats)
            jacobian = calc_jacobian(matches, cameras_params_dim, rs, ts,
                                     pc_builder, list_of_corners,
                                     point_id_to_cur, point_id_to_cloud,
                                     intrinsic_mat, start)
            jtj = jacobian.T @ jacobian

            us_mean = us.mean()
            alpha = alpha / ALPHA_DEC_STEP if us_mean < us_mean_prev else alpha * ALPHA_INC_STEP
            us_mean_prev = us_mean
            alpha_multiplier = 1 + alpha
            jtj[np.arange(jtj.shape[0]),
                np.arange(jtj.shape[0])] *= alpha_multiplier

            u = jtj[:cameras_params_dim, :cameras_params_dim]
            v = jtj[cameras_params_dim:, cameras_params_dim:]
            w = jtj[:cameras_params_dim, cameras_params_dim:]
            wt = jtj[cameras_params_dim:, :cameras_params_dim]
            v_inv = block_inv(v)
            g = jacobian.T @ us
            b = w @ v_inv @ g[cameras_params_dim:] - g[:cameras_params_dim]
            a = (u - w @ v_inv @ wt).toarray()
            delta_c = np.linalg.solve(a, b)
            delta_x = v_inv @ (-g[cameras_params_dim:] - wt @ delta_c)
            for k, j in enumerate(range(start, end)):
                rs[j] += delta_c[k * 6:k * 6 + 3]
                ts[j] += delta_c[k * 6 + 3:k * 6 + 6]
                view_mats[j] = rodrigues_and_translation_to_view_mat3x4(
                    rs[j], ts[j].reshape(3, 1))
            relevant_points += delta_x.reshape((-1, 3))
            pc_builder.update_points(relevant_point_ids, relevant_points)
    re = calc_reprojection_error(inliers, pc_builder, point_id_to_cloud,
                                 list_of_corners, intrinsic_mat, view_mats)
    print('Reprojection error after bundle adjustment:', re)
    return view_mats
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    view_mats = np.zeros((len(corner_storage), 3, 4), dtype=np.float64)
    processed_frames = np.zeros(len(corner_storage), dtype=np.bool)
    points3d = np.zeros((corner_storage.max_corner_id() + 1, 3),
                        dtype=np.float64)
    added_points = np.zeros(corner_storage.max_corner_id() + 1, dtype=np.bool)

    print('Trying to extract 3d points from known frames...')

    view_mats[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])
    processed_frames[known_view_1[0]] = processed_frames[
        known_view_2[0]] = True
    extracted_points = extract_points(corner_storage[known_view_1[0]],
                                      corner_storage[known_view_2[0]],
                                      view_mats[known_view_1[0]],
                                      view_mats[known_view_2[0]],
                                      intrinsic_mat)
    if not extracted_points:
        print(
            'Extracting 3d points from known frames failed: '
            'either there are no common points, or triangulation angle between frames is too small.\n'
            'Try to choose another initial frames.',
            file=sys.stderr)
        exit(0)
    print('Succeeded! Trying to build point cloud...')

    added_points[extracted_points[0]] = True
    points3d[extracted_points[0]] = extracted_points[1]

    was_updated = True
    while was_updated:
        was_updated = False
        unprocessed_frames = np.arange(len(corner_storage),
                                       dtype=np.int32)[~processed_frames]
        for frame in unprocessed_frames:
            points3d_ids = np.arange(corner_storage.max_corner_id() + 1,
                                     dtype=np.int32)[added_points]
            common, (indices1, indices2) = snp.intersect(
                points3d_ids,
                corner_storage[frame].ids.flatten(),
                indices=True)
            if len(common) <= 5:
                continue

            try:
                print(f'Processing frame {frame}: ', end='')
                retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                    objectPoints=points3d[common],
                    imagePoints=corner_storage[frame].points[indices2],
                    cameraMatrix=intrinsic_mat,
                    distCoeffs=None)
            except:
                retval = False

            if not retval:
                print(f'failed to solve PnP RANSAC, trying another frame')
                continue

            print(f'succeeded, found {len(inliers)} inliers')
            was_updated = True
            view_mats[frame] = rodrigues_and_translation_to_view_mat3x4(
                rvec, tvec)
            for processed_frame in np.arange(len(corner_storage),
                                             dtype=np.int32)[processed_frames]:
                extracted_points = extract_points(
                    corner_storage[frame], corner_storage[processed_frame],
                    view_mats[frame], view_mats[processed_frame],
                    intrinsic_mat, points3d_ids)

                if extracted_points:
                    added_points[extracted_points[0]] = True
                    points3d[extracted_points[0]] = extracted_points[1]
            processed_frames[frame] = True
            print(f'Current point cloud contains {sum(added_points)} points')

    for _ in range(2):
        for i in range(1, len(corner_storage)):
            if not processed_frames[i]:
                processed_frames[i] = True
                view_mats[i] = view_mats[i - 1]
        processed_frames = processed_frames[::-1]
        view_mats = view_mats[::-1]

    print(
        f'Finished building point cloud, now it contains {sum(added_points)} points'
    )

    point_cloud_builder = PointCloudBuilder(ids=np.arange(
        corner_storage.max_corner_id() + 1, dtype=np.int32)[added_points],
                                            points=points3d[added_points])

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #18
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    triang_params = TriangulationParameters(
        max_reprojection_error=MAX_PROJ_ERROR,
        min_triangulation_angle_deg=MIN_TRIANG_ANG,
        min_depth=MIN_DEPTH)

    correspondences = build_correspondences(corner_storage[known_view_1[0]],
                                            corner_storage[known_view_2[0]])
    view_mat_1 = pose_to_view_mat3x4(known_view_1[1])
    view_mat_2 = pose_to_view_mat3x4(known_view_2[1])
    points, ids, median_cos = triangulate_correspondences(
        correspondences, view_mat_1, view_mat_2, intrinsic_mat, triang_params)
    if len(points) < 10:
        print("\rPlease, try other frames")
        exit(0)

    view_mats = [None] * len(rgb_sequence)
    view_mats[known_view_1[0]] = view_mat_1
    view_mats[known_view_2[0]] = view_mat_2

    point_cloud_builder = PointCloudBuilder(ids, points)
    was_update = True
    while was_update:
        was_update = False
        for i, (frame, corners) in enumerate(zip(rgb_sequence,
                                                 corner_storage)):
            if view_mats[i] is not None:
                continue
            _, (idx_1,
                idx_2) = snp.intersect(point_cloud_builder.ids.flatten(),
                                       corners.ids.flatten(),
                                       indices=True)
            try:
                retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                    point_cloud_builder.points[idx_1],
                    corners.points[idx_2],
                    intrinsic_mat,
                    distCoeffs=None)
                inliers = np.array(inliers, dtype=int)
                if len(inliers) > 0:
                    view_mats[i] = rodrigues_and_translation_to_view_mat3x4(
                        rvec, tvec)
                    was_update = True
                print(
                    f"\rFrame {i} out of {len(rgb_sequence)}, inliners: {len(inliers)}",
                    end="")
            except Exception:
                print(f"\rFrame {i} out of {len(rgb_sequence)}, inliners: {0}",
                      end="")
            if view_mats[i] is None:
                continue
            cur_corner = filter_frame_corners(corner_storage[i],
                                              inliers.flatten())

            for j in range(len(rgb_sequence)):
                if view_mats[j] is None:
                    continue
                correspondences = build_correspondences(
                    corner_storage[j], cur_corner)
                if len(correspondences.ids) == 0:
                    continue
                points, ids, median_cos = triangulate_correspondences(
                    correspondences, view_mats[j], view_mats[i], intrinsic_mat,
                    triang_params)
                point_cloud_builder.add_points(ids, points)

        print()

    first_mat = next((mat for mat in view_mats if mat is not None), None)
    if first_mat is None:
        print("\rFail")
        exit(0)

    view_mats[0] = first_mat
    for i in range(1, len(view_mats)):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i - 1]
    view_mats = np.array(view_mats)

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #19
0
class CameraTracker:

    def __init__(self, corner_storage, intrinsic_mat, view_1, view_2):
        self.corner_storage = corner_storage
        self.frame_count = len(corner_storage)
        self.intrinsic_mat = intrinsic_mat
        self.triangulation_parameters = TriangulationParameters(1, 3, .1)
        self.view_mats = {}

        if view_1 is None or view_2 is None:
            print('Finding initial poses')
            pose1_idx, pose1, pose2_idx, pose2 = self.find_best_start_poses()
            print('Initial poses found in frames {0} and {1}'.format(pose1_idx, pose2_idx))
        else:
            pose1 = pose_to_view_mat3x4(view_1[1])
            pose2 = pose_to_view_mat3x4(view_2[1])
            pose1_idx = view_1[0]
            pose2_idx = view_2[0]

        corners_1 = self.corner_storage[pose1_idx]
        corners_2 = self.corner_storage[pose2_idx]

        corrs = build_correspondences(corners_1, corners_2)

        p, ids, med = triangulate_correspondences(corrs, pose1, pose2,
                                                  self.intrinsic_mat, self.triangulation_parameters)

        self.ids = ids
        self.point_cloud_builder = PointCloudBuilder(ids, p)
        self.point_frames = {}
        self.last_retriangulated = {}
        for i in ids:
            self.point_frames[i] = [pose1_idx, pose2_idx]
            self.last_retriangulated[i] = 2

        self.used_inliers = {}
        self.view_nones = {i for i in range(self.frame_count)}
        self.view_mats[pose1_idx] = pose1
        self.used_inliers[pose1_idx] = 0
        self.view_nones.remove(pose1_idx)
        self.view_mats[pose2_idx] = pose2
        self.used_inliers[pose2_idx] = 0
        self.view_nones.remove(pose2_idx)
        self.last_added_idx = pose2_idx
        self.last_inliers = []

        self.retriangulate_frames = 3
        self.max_repr_error = 1.5
        self.used_inliers_point = {}
        self.step = 0

    def track(self):
        while len(self.view_mats) < self.frame_count:
            print('Processing {0}/{1} frame'.format(len(self.view_mats) + 1, self.frame_count))
            for i, v_mat in self.view_mats.items():
                corners_1 = self.corner_storage[i]
                corners_2 = self.corner_storage[self.last_added_idx]

                corrs = build_correspondences(corners_1, corners_2)
                try:
                    p, ids, _ = triangulate_correspondences(corrs, v_mat, self.view_mats[self.last_added_idx],
                                                            self.intrinsic_mat, self.triangulation_parameters)
                    new_corners = []
                    new_ids = []
                    for j, pt in zip(ids, p):
                        if j in self.last_inliers or j not in self.point_frames:
                            new_ids += [j]
                            new_corners += [pt]
                    for i_ in ids:
                        if i_ not in self.point_frames:
                            self.point_frames[i_] = [i]
                        if self.last_added_idx not in self.point_frames[i_]:
                            self.point_frames[i_] += [self.last_added_idx]
                    if len(new_ids) > 0:
                        self.point_cloud_builder.add_points(np.array(new_ids), np.array(new_corners))
                except:
                    continue


            print('Points cloud size: {0}'.format(len(self.point_cloud_builder.points)))
            view_mat, best_idx, inliers = self.solve_pnp_ransac()
            self.view_mats[best_idx] = view_mat[:3]
            self.used_inliers[best_idx] = inliers
            self.view_nones.remove(best_idx)
            self.last_added_idx = best_idx
            if self.step % (self.frame_count // 20) == 0 and self.step > 0:
                self.retriangulate_points()
            if self.step % (self.frame_count // 10) == 0 and self.step > 0:
                self.update_tracks()
                # k = list(self.view_mats.keys())
                # k = k[self.step - 10:self.step]
                # self.bundle_adjustment(k)
            self.step += 1

    def update_tracks(self):
        for i in self.view_mats.keys():
            v_mat, inliers = self.solve_pnp_ransac_one(i)
            if self.used_inliers[i] <= len(inliers):
                self.view_mats[i] = v_mat[:3]
                self.used_inliers[i] = len(inliers)

    def solve_pnp_ransac(self):
        best = None
        best_ans = []
        nones = list(self.view_nones)
        for i in nones:
            try:
                v_mat, inliers = self.solve_pnp_ransac_one(i)
            except:
                continue
            if len(inliers) > len(best_ans):
                best = v_mat, i
                best_ans = inliers
        print('Used {} inliers'.format(len(best_ans)))

        self.last_inliers = best_ans

        return best[0], best[1], len(best_ans)

    def solve_pnp_ransac_one(self, frame_id):
        ids_3d = self.point_cloud_builder.ids
        ids_2d = self.corner_storage[frame_id].ids
        ids_intersection, _ = snp.intersect(ids_3d.flatten(), ids_2d.flatten(), indices=True)
        idx3d = [i for i, j in enumerate(ids_3d) if j in ids_intersection]
        idx2d = [i for i, j in enumerate(ids_2d) if j in ids_intersection]
        points3d = self.point_cloud_builder.points[idx3d]
        points2d = self.corner_storage[frame_id].points[idx2d]

        if len(points3d) < 6:
            return None, []

        _, _, _, inliers = cv2.solvePnPRansac(
            objectPoints=points3d,
            imagePoints=points2d,
            cameraMatrix=self.intrinsic_mat,
            distCoeffs=np.array([]),
            iterationsCount=250,
            flags=cv2.SOLVEPNP_EPNP
        )

        if len(points3d[inliers]) < 6:
            return None, []

        _, r_vec, t_vec = cv2.solvePnP(
            objectPoints=points3d[inliers],
            imagePoints=points2d[inliers],
            cameraMatrix=self.intrinsic_mat,
            distCoeffs=np.array([]),
            flags=cv2.SOLVEPNP_ITERATIVE
        )
        view_mat = rodrigues_and_translation_to_view_mat3x4(r_vec, t_vec)
        return view_mat, inliers

    def retriangulate_points(self):
        to_retriangulate = [i for i in self.point_cloud_builder.ids
                            if ((i[0] not in self.last_retriangulated) or
                                (len(self.point_frames[i[0]]) - self.last_retriangulated[i[0]]) > 5)
                            and i[0] in self.point_frames]
        np.random.shuffle(to_retriangulate)
        to_retriangulate = to_retriangulate[:300]
        self.retriangulate_points_arr(to_retriangulate)

    def retriangulate_points_arr(self, to_retriangulate):
        retriangulated_coords = []
        retriangulated_ids = []
        for idx in to_retriangulate:
            new_coords, cnt = self.retriangulate_point(idx)
            if new_coords is None:
                continue
            if idx[0] not in self.used_inliers_point or self.used_inliers_point[idx[0]] < cnt:
                self.used_inliers_point[idx[0]] = cnt
                retriangulated_ids += [idx[0]]
                retriangulated_coords += [new_coords[0]]
                self.last_retriangulated[idx[0]] = len(self.point_frames[idx[0]])
        retriangulated_ids = np.array(retriangulated_ids)
        retriangulated_coords = np.array(retriangulated_coords)
        if len(retriangulated_coords) == 0:
            return
        print('Retriangulated {} points'.format(len(retriangulated_ids)))
        self.point_cloud_builder.update_points(retriangulated_ids, retriangulated_coords)

    def retriangulate_point(self, point_id):
        coords = []
        frames = []
        v_mats = []
        for i, frame in enumerate(self.corner_storage):
            if point_id in frame.ids and i in self.view_mats.keys():
                idx = np.where(frame.ids == point_id)[0][0]
                coords += [frame.points[idx]]
                frames += [i]
                v_mats += [self.view_mats[i]]
        if len(coords) < 3:
            return None, None
        if len(coords) > self.retriangulate_frames:
            idxs = np.random.choice(len(coords), size=self.retriangulate_frames, replace=False)
            coords = np.array(coords)[idxs]
            frames = np.array(frames)[idxs]
            v_mats = np.array(v_mats)[idxs]
        best_coords = None
        best_cnt = 0
        for _ in range(4):
            i, j = np.random.choice(len(coords), 2, replace=True)
            corrs = Correspondences(np.array([point_id]), np.array([coords[i]]), np.array([coords[j]]))
            point3d, _, _ = triangulate_correspondences(corrs, v_mats[i], v_mats[j], self.intrinsic_mat,
                                                        self.triangulation_parameters)
            if len(point3d) == 0:
                continue
            errors = np.array([compute_reprojection_errors(point3d, np.array([p]), self.intrinsic_mat @ m)
                               for m, p in zip(v_mats, coords)])
            cnt = np.sum(errors < self.max_repr_error)
            if best_coords is None or best_cnt < cnt:
                best_cnt = cnt
                best_coords = point3d
        if best_cnt == 0:
            return None, None
        return best_coords, best_cnt

    def find_best_start_poses(self):
        best_i = 0
        best_j = 0
        best_j_pose = None
        best_pose_points = 0
        step = self.frame_count // 20
        for i in range(self.frame_count):
            for j in range(i + 5, self.frame_count, step):
                pose, pose_points = self.get_pose(i, j)
                if pose_points > best_pose_points:
                    best_i = i
                    best_j = j
                    best_j_pose = pose
                    best_pose_points = pose_points
        return best_i, eye3x4(), best_j, pose_to_view_mat3x4(best_j_pose)

    def get_pose(self, frame_1, frame_2):
        p1 = self.corner_storage[frame_1]
        p2 = self.corner_storage[frame_2]

        corresp = build_correspondences(p1, p2)

        if len(corresp.ids) < 6:
            return None, 0

        E, mask_essential = cv2.findEssentialMat(corresp[1], corresp[2], self.intrinsic_mat, method=cv2.RANSAC,
                                                 threshold=1.0)

        _, mask_homography = cv2.findHomography(corresp[1], corresp[2], method=cv2.RANSAC)

        if mask_essential is None or mask_homography is None:
            return None, 0

        essential_inliers = mask_essential.flatten().sum()
        homography_inliers = mask_homography.flatten().sum()

        if homography_inliers / essential_inliers > 0.5:
            return None, 0

        corresp = _remove_correspondences_with_ids(corresp, np.argwhere(mask_essential == 0))

        R1, R2, t = cv2.decomposeEssentialMat(E)

        possible_poses = [Pose(R1.T, R1.T @ t), Pose(R1.T, R1.T @ (-t)), Pose(R2.T, R2.T @ t), Pose(R2.T, R2.T @ (-t))]

        best_pose_points = 0
        best_pose = None

        for pose in possible_poses:
            p, ids, med = triangulate_correspondences(corresp, eye3x4(), pose_to_view_mat3x4(pose), self.intrinsic_mat, self.triangulation_parameters)
            if len(p) > best_pose_points:
                best_pose_points = len(p)
                best_pose = pose

        return best_pose, best_pose_points

    def bundle_adjustment(self, frames):
        def view_mat3x4_to_rodrigues_and_translation(vmat):
            r_mat = vmat[:, :3]
            _t_vec = vmat[:, 3:]
            _r_vec, _ = cv2.Rodrigues(r_mat)
            return _r_vec, _t_vec

        def bundle_adjustment_sparsity(n_cameras, n_points):
            m = camera_indices.size * 2
            n = n_cameras * 6 + n_points * 3
            A_ = lil_matrix((m, n), dtype=int)
            i = np.arange(len(camera_indices))
            for s in range(6):
                A_[2 * i, camera_indices * 6 + s] = 1
                A_[2 * i + 1, camera_indices * 6 + s] = 1

            for s in range(3):
                A_[2 * i, n_cameras * 6 + points_indices * 3 + s] = 1
                A_[2 * i + 1, n_cameras * 6 + points_indices * 3 + s] = 1
            return A_

        def rotate(points_, rot_vecs):
            theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis]
            with np.errstate(invalid='ignore'):
                v = rot_vecs / theta
                v = np.nan_to_num(v)
            dot = np.sum(points_ * v, axis=1)[:, np.newaxis]
            cos_theta = np.cos(theta)
            sin_theta = np.sin(theta)
            return cos_theta * points_ + sin_theta * np.cross(v, points_) + dot * (1 - cos_theta) * v

        def project(points_, camera_params):
            points_proj = rotate(points_, camera_params[:, :3])
            points_proj += camera_params[:, 3:6]
            points_proj = np.dot(self.intrinsic_mat, points_proj.T)
            points_proj /= points_proj[[2]]
            return points_proj[:2].T

        def fun(params_, n_cameras, n_points):
            camera_params = params_[:n_cameras * 6].reshape((n_cameras, 6))
            points_3d = params_[n_cameras * 6:].reshape((n_points, 3))
            points_proj = project(points_3d[points_indices], camera_params[camera_indices])
            return (points_proj - corners_points).ravel()

        def residuals(params_):
            return fun(params_, len(frames), len(intersected))

        def apply_res(res_):
            for i in range(len(intersected)):
                p_ = res_[6 * len(frames) + i * 3:6 * len(frames) + i * 3 + 3]
                _idx = np.argwhere(self.point_cloud_builder.ids == intersected[i])[0][0]
                _p = self.point_cloud_builder.points[_idx]
                check_point(_p, p_, intersected[i])
            for i in range(len(frames)):
                r_vec_ = np.array([res_[i * 6], res_[i * 6 + 1], res_[i * 6 + 2]])
                t_vec_ = np.array([[res_[i * 6 + 3]], [res_[i * 6 + 4]], [res_[i * 6 + 5]]])
                v_mat = rodrigues_and_translation_to_view_mat3x4(r_vec_, t_vec_)
                self.view_mats[frames[i]] = v_mat

        def check_point(p_before, p_after, idx):
            projected_before = []
            projected_after = []
            point2d = []
            for frame in frames:
                projected_before += [project_points(np.array([p_before]), self.intrinsic_mat @ self.view_mats[frame])]
                projected_after += [project_points(np.array([p_after]), self.intrinsic_mat @ self.view_mats[frame])]
                idx_ = np.argwhere(self.corner_storage[frame].ids == idx)[0][0]
                p = self.corner_storage[frame].points[idx_]
                point2d += [p]
            projected_before = np.array(projected_before)
            projected_after = np.array(projected_after)
            point2d = np.array(point2d)
            err_before = np.linalg.norm(point2d - projected_before)
            err_before_amount = np.sum(err_before < 1.0)
            err_after = np.linalg.norm(point2d - projected_after)
            err_after_amount = np.sum(err_after < 1.0)
            if err_before_amount < err_after_amount:
                self.point_cloud_builder.points[idx] = p_after


        intersected = self.point_cloud_builder.ids.flatten()
        corners_idxs = {}
        for frame in frames:
            crnrs = self.corner_storage[frame].ids
            crnrs = crnrs.flatten()
            intersected, _ = snp.intersect(
                intersected, crnrs,
                indices=True)

        intersected = np.array(list(sorted(np.random.choice(intersected, min(500, len(intersected)), replace=False))))
        for frame in frames:
            corner_idx, _ = snp.intersect(
                intersected,
                self.corner_storage[frame].ids.flatten(),
                indices=True)
            corners_idxs[frame] = corner_idx

        points = []
        for idx in intersected:
            idx_ = np.argwhere(self.point_cloud_builder.ids == idx)[0][0]
            p = self.point_cloud_builder.points[idx_]
            points += [p[0], p[1], p[2]]
        corners_points = []
        for frame in frames:
            for idx in corners_idxs[frame]:
                idx_ = np.argwhere(self.corner_storage[frame].ids == idx)[0][0]
                p = self.corner_storage[frame].points[idx_]
                corners_points += [[p[0], p[1]]]
        camera_parameters = []
        for frame in frames:
            r_vec, t_vec = view_mat3x4_to_rodrigues_and_translation(self.view_mats[frame])
            camera_parameters += [r_vec[0], r_vec[1], r_vec[2], t_vec[0], t_vec[1], t_vec[2]]
        params = camera_parameters + points

        camera_indices = []
        for i in range(len(frames)):
            camera_indices += [i] * len(intersected)
        camera_indices = np.array(camera_indices)
        points_indices = np.tile(np.arange(len(intersected)), len(frames))

        self.retriangulate_points_arr([np.array([item]) for item in intersected])

        A = bundle_adjustment_sparsity(len(frames), len(intersected))
        res = least_squares(residuals, params, jac_sparsity=A, verbose=2, x_scale='jac', ftol=1e-4, xtol=1e-4, method='trf')
        apply_res(res.x)
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    frames_num = len(rgb_sequence)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = select_frames(rgb_sequence,
                                                   corner_storage,
                                                   intrinsic_mat)
        if known_view_2[0] == -1:
            print("Failed to find good starting frames")
            exit(0)

    correspondences = build_correspondences(corner_storage[known_view_1[0]],
                                            corner_storage[known_view_2[0]])
    view_mat1 = pose_to_view_mat3x4(known_view_1[1])
    view_mat2 = pose_to_view_mat3x4(known_view_2[1])

    points, ids, _ = triangulate_correspondences(correspondences, view_mat1,
                                                 view_mat2, intrinsic_mat,
                                                 TRIANG_PARAMS)

    if len(ids) < 8:
        print("Bad frames: too few correspondences!")
        exit(0)

    view_mats, point_cloud_builder = [None] * frames_num, PointCloudBuilder(
        ids, points)
    view_mats[known_view_1[0]] = view_mat1
    view_mats[known_view_2[0]] = view_mat2

    updated = True
    pass_num = 0
    while updated:
        updated = False
        pass_num += 1

        for i in range(frames_num):
            if view_mats[i] is not None:
                continue

            corners = corner_storage[i]

            _, ind1, ind2 = np.intersect1d(point_cloud_builder.ids,
                                           corners.ids.flatten(),
                                           return_indices=True)
            try:
                _, rvec, tvec, inliers = cv2.solvePnPRansac(
                    point_cloud_builder.points[ind1],
                    corners.points[ind2],
                    intrinsic_mat,
                    distCoeffs=None)

                if inliers is None:
                    print(f"Pass {pass_num}, frame {i}. No inliers!")
                    continue

                print(
                    f"Pass {pass_num}, frame {i}. Number of inliers == {len(inliers)}"
                )

                view_mats[i] = rodrigues_and_translation_to_view_mat3x4(
                    rvec, tvec)
            except Exception:
                continue

            updated = True

            cloud_changed = add_points(point_cloud_builder, corner_storage, i,
                                       view_mats, intrinsic_mat)
            if cloud_changed:
                print(f"Size of point cloud == {len(point_cloud_builder.ids)}")

    first_not_none = next(item for item in view_mats if item is not None)
    if view_mats[0] is None:
        view_mats[0] = first_not_none

    for i in range(frames_num):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i - 1]

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)

    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #21
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(
        camera_parameters,
        rgb_sequence[0].shape[0]
    )

    # TODO: implement
    frame_count = len(corner_storage)
    view_mats = [None] * frame_count
    view_1_id, view_1 = known_view_1[0], pose_to_view_mat3x4(known_view_1[1])
    view_2_id, view_2 = known_view_2[0], pose_to_view_mat3x4(known_view_2[1])

    view_mats[view_1_id] = view_1
    view_mats[view_2_id] = view_2

    correspondences = build_correspondences(corner_storage[view_1_id], corner_storage[view_2_id], None)
    triangulation_parameters = TriangulationParameters(1, 5, 0)
    points, ids, _ = triangulate_correspondences(correspondences, view_1, view_2,
                                                 intrinsic_mat, TriangulationParameters(0.1, 0.1, 0))

    #corners_0 = corner_storage[0]
    point_cloud_builder = PointCloudBuilder(ids, points)

    unknown_frames = frame_count - 2
    known_frames = {view_1_id, view_2_id}

    while unknown_frames > 0:
        for i in range(frame_count):
            if view_mats[i] is None:
                corners = corner_storage[i]
                interesting_ids, in_corners, in_cloud = np.intersect1d(corners.ids.flatten(), point_cloud_builder.ids.flatten(), return_indices=True)
                points_2d = corners.points[in_corners]
                points_3d = point_cloud_builder.points[in_cloud]

                if len(ids) < 3:
                    continue

                method = cv2.SOLVEPNP_EPNP
                if len(ids) == 3:
                    method = cv2.SOLVEPNP_P3P
                retval, rvec, tvec, inliers = cv2.solvePnPRansac(points_3d, points_2d, intrinsic_mat, None,
                                                                 flags=method)
                if not retval:
                    continue
                retval, rvec, tvec = cv2.solvePnP(points_3d[inliers], points_2d[inliers], intrinsic_mat, None,
                                                  rvec=rvec, tvec=tvec, useExtrinsicGuess=True)
                if not retval:
                    continue

                view_mats[i] = rodrigues_and_translation_to_view_mat3x4(rvec, tvec)
                unknown_frames -= 1
                known_frames.add(i)

                outliers = np.delete(interesting_ids, inliers)
                for frame in known_frames:
                    correspondences = build_correspondences(corners, corner_storage[frame], outliers)
                    points_3d, corner_ids, _ = triangulate_correspondences(
                        correspondences, view_mats[i], view_mats[frame], intrinsic_mat, triangulation_parameters)
                    point_cloud_builder.add_points(corner_ids, points_3d)




    calc_point_cloud_colors(
        point_cloud_builder,
        rgb_sequence,
        view_mats,
        intrinsic_mat,
        corner_storage,
        5.0
    )
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #22
0
class CameraTracker:
    def __init__(self, intrinsic_mat, corner_storage, known_view_1, known_view_2):
        self.intrinsic_mat = intrinsic_mat
        self.corner_storage = corner_storage
        frame_num_1 = known_view_1[0]
        frame_num_2 = known_view_2[0]
        self.pc_builder = PointCloudBuilder()
        view_matr_1 = pose_to_view_mat3x4(known_view_1[1])
        view_matr_2 = pose_to_view_mat3x4(known_view_2[1])

        self.tracked_views = [eye3x4()] * len(self.corner_storage)
        self.tracked_views[frame_num_1] = view_matr_1
        self.tracked_views[frame_num_2] = view_matr_2
        self.initial_baseline = get_baseline(view_matr_1, view_matr_2)
        print("initial baseline =", self.initial_baseline)

        self._add_points_from_frame(frame_num_1, frame_num_2, initial_triangulation=True)
        self.min_init_frame = min(frame_num_1, frame_num_2)
        self.max_init_frame = max(frame_num_1, frame_num_2)
        self.outliers = set()

    def _get_pos(self, frame_number) -> np.ndarray:
        corners = self.corner_storage[frame_number]
        _, pts_ids, corners_ids = np.intersect1d(self.pc_builder.ids, corners.ids, assume_unique=True,
                                                 return_indices=True)

        common_pts_3d = self.pc_builder.points[pts_ids]
        common_ids = self.pc_builder.ids[pts_ids]
        common_corners = corners.points[corners_ids]
        inlier_mask = np.zeros_like(common_pts_3d, dtype=np.bool)
        inlier_counter = 0
        for common_id, mask_elem in zip(common_ids[:, 0], inlier_mask):
            if common_id not in self.outliers:
                mask_elem[:] = True
                inlier_counter += 1
        if inlier_counter > 7:
            common_pts_3d = common_pts_3d[inlier_mask].reshape(-1, 3)
            common_ids = common_ids[inlier_mask[:, :1]].reshape(-1, 1)
            common_corners = common_corners[inlier_mask[:, :2]].reshape(-1, 2)
        max_reproj_error = 3.0
        if len(common_pts_3d) < 4:
            raise TrackingError('Not enough points to solve RANSAC on frame ' + str(frame_number))
        _, rot_vec, tr_vec, inliers = cv2.solvePnPRansac(common_pts_3d,
                                                         common_corners,
                                                         self.intrinsic_mat,
                                                         None,
                                                         reprojectionError=max_reproj_error,
                                                         flags=cv2.SOLVEPNP_EPNP)
        extrinsic_mat = rodrigues_and_translation_to_view_mat3x4(rot_vec, tr_vec)
        proj_matr = self.intrinsic_mat @ extrinsic_mat
        if inliers is None:
            raise TrackingError('Failed to solve PnP on frame' + str(frame_number))

        while len(inliers) < 8 and max_reproj_error < 50:
            max_reproj_error *= 1.2
            inliers = calc_inlier_indices(common_pts_3d, common_corners, proj_matr, max_reproj_error)
        inlier_pts = common_pts_3d[inliers]
        inlier_corners = common_corners[inliers]
        outlier_ids = np.setdiff1d(common_ids, common_ids[inliers], assume_unique=True)
        self.outliers.update(outlier_ids)
        if len(inliers) < 4:
            inlier_pts = common_pts_3d
            inlier_corners = common_corners
        print('Found position on', len(inlier_corners), 'inliers')
        _, rot_vec, tr_vec, inliers = cv2.solvePnPRansac(inlier_pts, inlier_corners, self.intrinsic_mat, None,
                                                         rot_vec, tr_vec, useExtrinsicGuess=True)
        return rodrigues_and_translation_to_view_mat3x4(rot_vec, tr_vec)

    def _add_points_from_frame(self,
                               frame_num_1: int,
                               frame_num_2: int,
                               initial_triangulation: bool = False) -> int:
        corners_1 = self.corner_storage[frame_num_1]
        corners_2 = self.corner_storage[frame_num_2]
        correspondences = build_correspondences(corners_1, corners_2, ids_to_remove=self.pc_builder.ids)
        if len(correspondences.ids) > 0:
            max_reproj_error = 1.0
            min_angle = 1.0
            view_1 = self.tracked_views[frame_num_1]
            view_2 = self.tracked_views[frame_num_2]
            triangulation_params = TriangulationParameters(max_reproj_error, min_angle, 0)
            pts_3d, triangulated_ids, med_cos = triangulate_correspondences(correspondences, view_1,
                                                                            view_2, self.intrinsic_mat,
                                                                            triangulation_params)
            if initial_triangulation:
                num_iter = 0
                while len(pts_3d) < 8 and len(correspondences.ids) > 7 and num_iter < 100:
                    max_reproj_error *= 1.2
                    min_angle *= 0.8
                    triangulation_params = TriangulationParameters(max_reproj_error, min_angle, 0)
                    pts_3d, triangulated_ids, med_cos = triangulate_correspondences(correspondences, view_1,
                                                                                    view_2, self.intrinsic_mat,
                                                                                    triangulation_params)
                    num_iter += 1

                if num_iter >= 100 and len(pts_3d) < 4:
                    raise TrackingError('Failed to triangulate enough points')
            self.pc_builder.add_points(triangulated_ids, pts_3d)

            return len(pts_3d)
        elif initial_triangulation:
            raise TrackingError('Not found correspondences on image pair')
        else:
            return 0

    def track(self):
        curr_frame = self.min_init_frame + 1
        seq_size = len(self.corner_storage)
        for _ in range(2, seq_size):
            if curr_frame == self.max_init_frame:
                curr_frame += 1
            if curr_frame >= seq_size:
                curr_frame = self.min_init_frame - 1
                self.outliers = set()
            print('Frame', curr_frame)
            try:
                self.tracked_views[curr_frame] = self._get_pos(curr_frame)
            except TrackingError as error:
                print(error)
                print('Stopping tracking')
                break
            prev_curr_diff = 5
            num_pairs = 0
            num_added = 0
            while num_pairs < 5:
                prev_frame = \
                    curr_frame - prev_curr_diff if curr_frame > self.min_init_frame else curr_frame + prev_curr_diff
                prev_curr_diff += 1
                if prev_frame < seq_size and (self.min_init_frame <= prev_frame or curr_frame < self.min_init_frame):
                    if check_baseline(self.tracked_views[prev_frame], self.tracked_views[curr_frame],
                                      self.initial_baseline * 0.15):
                        num_added += self._add_points_from_frame(prev_frame, curr_frame)
                        num_pairs += 1
                else:
                    break
            print('Added', num_added, 'points')
            print('Point cloud size = ', len(self.pc_builder.ids))
            if curr_frame > self.min_init_frame:
                curr_frame += 1
            else:
                curr_frame -= 1

        return self.tracked_views, self.pc_builder
Example #23
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = get_matrix_poses(corner_storage,
                                                      intrinsic_mat)

    video_size = len(rgb_sequence)
    frame_trackers = [
        FrameTrack(i, corner_storage[i]) for i in range(video_size)
    ]
    known_tracker_creator = lambda x: FrameTrack(x[0], corner_storage[x[0]],
                                                 pose_to_view_mat3x4(x[1]))

    frame_trackers[known_view_1[0]] = known_tracker_creator(known_view_1)
    frame_trackers[known_view_2[0]] = known_tracker_creator(known_view_2)

    init_params = triang_params

    for angle in range(90, 0, -2):
        params = TriangulationParameters(
            max_reprojection_error=MAX_REPROJECTION_ERROR,
            min_triangulation_angle_deg=angle,
            min_depth=0.001)
        _, points = triangulate_trackers(frame_trackers[known_view_1[0]],
                                         frame_trackers[known_view_2[0]],
                                         intrinsic_mat, params)
        if len(points) > 100:
            print(f"Chosen init angle: {angle}")
            init_params = params
            break

    ids, points = triangulate_trackers(frame_trackers[known_view_1[0]],
                                       frame_trackers[known_view_2[0]],
                                       intrinsic_mat, init_params)

    point_cloud_builder = PointCloudBuilder(ids, points)
    if len(points) < MIN_STARTING_POINTS:
        print(
            f"Not enough starting points ({len(points)}), please choose another initial frames pair"
            f"\n0, 20 is a good pair for short fox, ")
        return [], PointCloudBuilder()

    frame_trackers[known_view_1[0]].update_reproj_error(
        point_cloud_builder, intrinsic_mat)
    frame_trackers[known_view_2[0]].update_reproj_error(
        point_cloud_builder, intrinsic_mat)

    point_cloud_builder = track(ITERATIONS, frame_trackers,
                                point_cloud_builder, intrinsic_mat)

    view_mats = [x.mtx for x in frame_trackers]
    for i in range(1, len(view_mats)):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i - 1]
    for i in range(len(view_mats) - 2, -1, -1):
        if view_mats[i] is None:
            view_mats[i] = view_mats[i + 1]

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #24
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    view_mats_tmp = [None for _ in range(len(corner_storage))]
    all_points = {}
    not_none_mats = [known_view_1[0], known_view_2[0]]
    none_mats = set(
        [ind for ind in range(len(view_mats_tmp)) if ind not in not_none_mats])

    view_mats_tmp[known_view_1[0]] = pose_to_view_mat3x4(known_view_1[1])
    view_mats_tmp[known_view_2[0]] = pose_to_view_mat3x4(known_view_2[1])
    add_new_points(corner_storage, view_mats_tmp, known_view_1[0],
                   known_view_2[0], all_points, intrinsic_mat)

    old_len = 2
    for _ in range(len(view_mats_tmp)):
        for ind in none_mats:
            corner_ids = list(corner_storage[ind].ids.reshape(-1))
            intersection_inds = [
                point_ind for point_ind in corner_ids if
                point_ind in all_points and all_points[point_ind] is not None
            ]
            # There is an assert, so points number has to be at least 5
            if len(intersection_inds) < 5:
                continue
            # print(ind, len(corner_storage), len(corner_storage[ind].points), np.max(intersection_inds))
            corner_points = {
                i: p
                for i, p in zip(corner_ids, corner_storage[ind].points)
            }
            intersection_points_c = np.array(
                [corner_points[i] for i in intersection_inds])
            intersection_points_f = np.array(
                [all_points[i] for i in intersection_inds])
            # print(intersection_points_f)
            # print(intersection_points_f.shape, intersection_points_c.shape, intrinsic_mat)
            retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                intersection_points_f, intersection_points_c, intrinsic_mat,
                None)

            if not retval:
                continue

            print(f'Processing frame: {ind}')

            newly_none_number = 0
            for i in intersection_inds:
                if i not in inliers:
                    newly_none_number += 1
                    all_points[i] = None

            print(
                f'{newly_none_number} points filled as None, len of inliers: {inliers.shape[0]}'
            )
            print(
                f'Number of not points: {len([p for p in all_points.keys() if all_points[p] is not None])}'
            )

            view_mats_tmp[ind] = rodrigues_and_translation_to_view_mat3x4(
                rvec, tvec)

            for not_none_ind in not_none_mats:
                add_new_points(corner_storage, view_mats_tmp, ind,
                               not_none_ind, all_points, intrinsic_mat)

            not_none_mats.append(ind)
            none_mats.remove(ind)
            break

        if len(not_none_mats) == old_len:
            break
        old_len = len(not_none_mats)

    view_mats = [None for _ in range(len(corner_storage))]
    for view_mat_ind in not_none_mats:
        view_mats[view_mat_ind] = view_mats_tmp[view_mat_ind]
    last_ind = 0
    for i in range(len(view_mats)):
        if view_mats[i] is None:
            view_mats[i] = view_mats[last_ind]
        else:
            last_ind = i
    all_points = {k: v for k, v in all_points.items() if v is not None}
    point_cloud_builder = PointCloudBuilder(
        np.array(list(all_points.keys()), dtype=np.int),
        np.array(list(all_points.values())))

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #25
0
def track_and_calc_colors(
    camera_parameters: CameraParameters,
    corner_storage: CornerStorage,
    frame_sequence_path: str,
    known_view_1: Optional[Tuple[int, Pose]] = None,
    known_view_2: Optional[Tuple[int, Pose]] = None
) -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])
    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = initial_camera_views(
            corner_storage, intrinsic_mat)
    print(known_view_1[1])
    print(known_view_2[1])

    print(known_view_1[0], known_view_2[0])

    print(intrinsic_mat)

    points_3d, points_ids = calc_starting_points(intrinsic_mat, corner_storage,
                                                 known_view_1, known_view_2)

    #initial_points_3d, initial_points_ids = points_3d.copy(), points_ids.copy()

    n_points = corner_storage.max_corner_id() + 1
    res_points_3d = np.full((n_points, 3), None)
    #res_points_3d[points_ids] = points_3d

    view_mats = [np.full((3, 4), None) for _ in range(len(corner_storage))]
    view_mats[known_view_1[0]], view_mats[
        known_view_2[0]] = pose_to_view_mat3x4(
            known_view_1[1]), pose_to_view_mat3x4(known_view_2[1])

    print(f'len(corner_storage):{len(corner_storage)}')

    id1 = known_view_1[0]
    for n_frame, corners in enumerate(corner_storage[id1 + 1:], id1 + 1):
        common_obj, common_img, common_ids = get_common_points(
            corners, points_ids, points_3d)

        mat, points_3d, points_ids, reprojection_error = build_view_mat(
            common_obj, common_img, common_ids, intrinsic_mat)
        points_ids = np.array(points_ids)
        view_mats[n_frame] = mat

        is_null = np.array([(x[0] is None) for x in res_points_3d[points_ids]])
        #res_points_3d[points_ids[is_null]] = points_3d[is_null]

        n_inliers = len(points_3d)

        points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                corner_storage, view_mats,
                                                points_3d, points_ids)

        print(
            f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
            f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
        )

    for n_frame, corners in list(enumerate(corner_storage))[::-1]:
        common_obj, common_img, common_ids = get_common_points(
            corners, points_ids, points_3d)

        mat, points_3d, points_ids, reprojection_error = build_view_mat(
            common_obj, common_img, common_ids, intrinsic_mat)
        view_mats[n_frame] = mat

        is_null = np.array([(x[0] is None) for x in res_points_3d[points_ids]])
        #res_points_3d[points_ids[is_null]] = points_3d[is_null]

        n_inliers = len(points_3d)

        points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                corner_storage, view_mats,
                                                points_3d, points_ids)

        print(
            f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
            f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
        )

    # Approximating

    n_iter = 1
    for iter in range(n_iter):
        for n_frame, corners in enumerate(corner_storage):
            common_obj, common_img, common_ids = get_common_points(
                corners, points_ids, points_3d)

            mat, points_3d, points_ids, reprojection_error = build_view_mat(
                common_obj, common_img, common_ids, intrinsic_mat)

            view_mats[n_frame] = mat
            if iter == n_iter - 1:
                res_points_3d[points_ids] = points_3d

            n_inliers = len(points_3d)

            points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                    corner_storage, view_mats,
                                                    points_3d, points_ids)

            print(
                f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
                f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
            )

        for n_frame, corners in list(enumerate(corner_storage))[::-1]:
            common_obj, common_img, common_ids = get_common_points(
                corners, points_ids, points_3d)

            mat, points_3d, points_ids, reprojection_error = build_view_mat(
                common_obj, common_img, common_ids, intrinsic_mat)

            view_mats[n_frame] = mat
            if iter == n_iter - 1:
                res_points_3d[points_ids] = points_3d

            n_inliers = len(points_3d)

            points_3d, points_ids = calc_new_points(n_frame, intrinsic_mat,
                                                    corner_storage, view_mats,
                                                    points_3d, points_ids)

            print(
                f"Processing frame #{n_frame}. Number of inliers: {n_inliers}. "
                f"Reprojection error: {reprojection_error}. Tracking points: {len(common_img)}"
            )

    res_points_ids = np.array(
        [i for i, x in enumerate(res_points_3d) if x[0] is not None])
    res_points_3d = np.array(res_points_3d[res_points_ids], dtype=float)

    point_cloud_builder = PointCloudBuilder(ids=res_points_ids,
                                            points=res_points_3d)

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)

    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #26
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                            frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = calc_known_views(corner_storage,
                                                      intrinsic_mat)

    # TODO: implement
    num_frames_retr = 10
    triang_params = TriangulationParameters(8.0, 1.0, .1)

    num_frames = len(corner_storage)

    frame_1 = known_view_1[0]
    frame_2 = known_view_2[0]

    print(f'{frame_1}, {frame_2}')

    tvecs = [None] * num_frames
    tvecs[frame_1] = known_view_1[1].t_vec
    tvecs[frame_2] = known_view_2[1].t_vec

    view_mat_1 = pose_to_view_mat3x4(known_view_1[1])
    view_mat_2 = pose_to_view_mat3x4(known_view_2[1])

    view_mats = [None] * num_frames
    view_mats[frame_1] = view_mat_1
    view_mats[frame_2] = view_mat_2

    cloud = {}

    corrs = build_correspondences(corner_storage[frame_1],
                                  corner_storage[frame_2])
    points3d, ids, _ = triangulate_correspondences(corrs, view_mat_1,
                                                   view_mat_2, intrinsic_mat,
                                                   triang_params)
    for point3d, id in zip(points3d, ids):
        cloud[id] = point3d

    current_corners_occurences = {}
    corner_to_frames = {}

    for i, corners in enumerate(corner_storage):
        for j, id in enumerate(corners.ids.flatten()):
            current_corners_occurences[id] = 0
            if id not in corner_to_frames.keys():
                corner_to_frames[id] = [[i, j]]
            else:
                corner_to_frames[id].append([i, j])

    while len(untracked_frames(view_mats)) > 0:
        untr_frames = untracked_frames(view_mats)

        max_num_inl = -1
        best_frame = -1
        best_rvec = None
        best_tvec = None

        for frame in untr_frames:
            rvec, tvec, num_inl = calc_camera_pose(frame, corner_storage,
                                                   cloud, intrinsic_mat)
            if rvec is not None:
                if num_inl > max_num_inl:
                    best_frame = frame
                    max_num_inl = num_inl
                    best_rvec = rvec
                    best_tvec = tvec

        if max_num_inl == -1:
            break

        corners_to_add = []
        for id in corner_storage[best_frame].ids.flatten():
            if id in current_corners_occurences.keys():
                current_corners_occurences[id] += 1
                if current_corners_occurences[id] >= 2:
                    corners_to_add.append(id)

        for corner in corners_to_add:
            add_new_point(corner, corner_to_frames, view_mats, tvecs,
                          corner_storage, cloud, current_corners_occurences,
                          intrinsic_mat)

        print(f"Frame: {best_frame}, Inliers: {max_num_inl}")
        print(f"Cloud size: {len(cloud)}")
        view_mats[best_frame] = rodrigues_and_translation_to_view_mat3x4(
            best_rvec, best_tvec)
        tvecs[best_frame] = best_tvec

    last_mat = None
    for frame in range(num_frames):
        if view_mats[frame] is None:
            view_mats[frame] = last_mat
        else:
            last_mat = view_mats[frame]

    ids = []
    points = []

    for id in cloud.keys():
        ids.append(id)
        points.append(cloud[id])

    point_cloud_builder = PointCloudBuilder(np.array(ids, dtype=np.int),
                                            np.array(points))

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:

    # функция для проверки что матрица поворота дейтсвительно матрица поворота
    def check_if_mat_is_rot_mat(mat):
        mat = mat[:, :3]
        det = np.linalg.det(mat)
        # определитель должен быть = 1
        diff1 = abs(1 - det)
        x = mat @ mat.T
        # обратная матрица должна получаться транспонированием
        diff2 = abs(np.sum(np.eye(3) - x))
        eps = 0.001
        return diff1 < eps and diff2 < eps

    MAX_REPROJECTION_ERROR = 1.6
    MIN_DIST_TRIANGULATE = 0.01

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    frame_count = len(corner_storage)
    t_vecs = [None] * frame_count
    view_mats = [None] * frame_count  # позиции камеры на всех кадрах

    # для каждого уголка находим все кадры на котором он есть и его порядковый номер в каждом из них
    cur_corners_occurencies = {}
    frames_of_corner = {}
    for i, corners in enumerate(corner_storage):
        for id_in_list, j in enumerate(corners.ids.flatten()):
            cur_corners_occurencies[j] = 0
            if j not in frames_of_corner.keys():
                frames_of_corner[j] = [[i, id_in_list]]
            else:
                frames_of_corner[j].append([i, id_in_list])

    #по двум кадрам находим общие точки и восстанавливаем их позиции в 3D
    def triangulate(frame_0, frame_1, params=TriangulationParameters(2, 1e-3, 1e-4), ids_to_remove=None) \
            -> Tuple[np.ndarray, np.ndarray, float]:
        corrs = build_correspondences(corner_storage[frame_0],
                                      corner_storage[frame_1])
        return triangulate_correspondences(corrs, view_mats[frame_0],
                                           view_mats[frame_1], intrinsic_mat,
                                           params)

    #константы для инициализации
    BASELINE_THRESHOLD = 0.9
    REPROJECTION_ERROR_THRESHOLD = 1.4
    MIN_3D_POINTS = 10
    frame_steps = [9, 30, 40]

    #нахождение относительного движения между двумя данными кадрами
    def get_first_cam_pose(frame_0, frame_1):
        corrs = build_correspondences(corner_storage[frame_0],
                                      corner_storage[frame_1])
        e_mat, _ = cv2.findEssentialMat(corrs.points_1, corrs.points_2,
                                        intrinsic_mat, cv2.RANSAC, 0.999, 2.0)

        def essential_mat_to_camera_view_matrix(e_mat):
            U, S, VH = np.linalg.svd(e_mat, full_matrices=True)
            W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]], dtype=np.float)
            u3 = U[:, 2]
            possible_R = [U @ W @ VH, U @ W.T @ VH]
            possible_t = [u3, -u3]
            best_R = None
            best_t = None
            max_positive_z = 0
            for R in possible_R:
                for t in possible_t:
                    view_mat0 = np.hstack((np.eye(3), np.zeros((3, 1))))
                    view_mat1 = np.hstack((R, t.reshape((3, 1))))
                    view_mats[frame_0] = view_mat0
                    view_mats[frame_1] = view_mat1
                    points3d, ids, median_cos = triangulate(frame_0, frame_1)
                    cur_positive_z = 0
                    for pt in points3d:
                        pt = np.append(pt, np.zeros((1))).reshape((4, 1))
                        pt_transformed0 = (view_mat0 @ pt).flatten()
                        z0 = pt_transformed0[2]
                        pt_transformed1 = (view_mat1 @ pt).flatten()
                        z1 = pt_transformed1[2]
                        cur_positive_z += (z0 > 0.1) + (z1 > 0.1)
                    if cur_positive_z > max_positive_z:
                        max_positive_z = cur_positive_z
                        best_R = R
                        best_t = t
            return best_R, best_t

        R, t = essential_mat_to_camera_view_matrix(e_mat)
        baseline = np.linalg.norm(t)

        view_mat0 = np.hstack((np.eye(3), np.zeros((3, 1))))
        view_mat1 = np.hstack((R, t.reshape((3, 1))))
        view_mats[frame_0] = view_mat0
        view_mats[frame_1] = view_mat1
        points3d, ids, median_cos = triangulate(frame_0, frame_1)
        points0 = []
        points1 = []
        for i in ids:
            for j in frames_of_corner[i]:
                if j[0] == frame_0:
                    points0.append(j[1])
                if j[0] == frame_1:
                    points1.append(j[1])
        points0 = np.array(points0)
        points1 = np.array(points1)
        sum_error = compute_reprojection_errors(
            points3d, corner_storage[frame_0].points[points0],
            intrinsic_mat @ view_mat0) + compute_reprojection_errors(
                points3d, corner_storage[frame_1].points[points1],
                intrinsic_mat @ view_mat1)
        reprojection_error = np.mean(sum_error)
        points3d_count = len(points3d)

        view_mats[frame_0] = None
        view_mats[frame_1] = None

        #проверяем насколько хороша данная пара кадров, и заодно проверяем что матрица R - реально поворот
        if baseline < BASELINE_THRESHOLD or reprojection_error > REPROJECTION_ERROR_THRESHOLD or points3d_count < MIN_3D_POINTS or not check_if_mat_is_rot_mat(
                np.hstack((R, t.reshape((3, 1))))):
            return False, baseline, reprojection_error, points3d_count, None, None
        else:
            return True, baseline, reprojection_error, points3d_count, R, t

    #делаем перебор пар кадров для инициализации
    INF = 1e9
    best_frame_step = 5
    best = [-INF, INF, -INF]
    ind = None
    for frame_step in frame_steps:
        for i in range(frame_count // 2):
            if i + frame_step < frame_count * 0.85:
                ok, baseline, reproection_error, points3d_count, Rx, tx = get_first_cam_pose(
                    i, i + frame_step)
                if ok:
                    best = [baseline, reproection_error, points3d_count]
                    ind = i
                    best_frame_step = frame_step
                    if frame_count > 100:
                        break

    # крашаемся, если не удалось инициализироваться ни по какой паре кадров
    if ind is None:
        raise NotImplementedError()

    #инициализируемся по лучшей найденной паре
    frame_0 = ind
    frame_1 = ind + best_frame_step
    ok, baseline, reproection_error, points3d_count, R, t = get_first_cam_pose(
        frame_0, frame_1)
    print('INITIALIZATION RESULTS:')
    print('FRAMES: frame_0={}, frame_1={}, total_frames={}'.format(
        frame_0, frame_1, frame_count))
    print(baseline, reproection_error, points3d_count, R, t)
    view_mat0 = np.hstack((np.eye(3), np.zeros((3, 1))))
    view_mat1 = np.hstack((R, t.reshape((3, 1))))
    view_mats[frame_0] = view_mat0
    view_mats[frame_1] = view_mat1
    t_vecs[frame_0] = np.zeros((3), dtype=np.float64)
    t_vecs[frame_1] = t

    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # зачем хранить инфу о пендинг точках вперемешку с остальными точками облака???
    # 'id': coords [., ., .] 3d coordinates, rays [(frame, coords)] - for new points addition
    points_cloud = {}

    # инициализируем облако точек по 2 положениям
    points3d, ids, median_cos = triangulate(frame_0, frame_1)
    for id, point3d in zip(ids, points3d):
        points_cloud[id] = point3d

    def find_camera_pose(frame_id):
        # сначала находим инлаеры ранзаком
        # потом на инлаерах делаем пнп с итеративной мин кв
        corners = corner_storage[frame_id]
        points3d = []
        points2d = []
        for id, point2d in zip(corners.ids.flatten(), corners.points):
            if id in points_cloud.keys():
                points3d.append(points_cloud[id])
                points2d.append(point2d)
        # чтобы епнп работал
        points3d = np.array(points3d, dtype=np.float64)
        points2d = np.array(points2d, dtype=np.float64)
        if len(points3d) < 4:
            return None

        # выбираем эталонную 4 точек, строим по ней R и t
        # затем смотрим, на какой выборке эталонных точек больше всего других точек
        # спроецируются корректно
        success, R, t, inliers = cv2.solvePnPRansac(
            objectPoints=points3d,
            imagePoints=points2d,
            cameraMatrix=intrinsic_mat,
            distCoeffs=None,
            flags=cv2.SOLVEPNP_EPNP,
            confidence=0.9995,
            reprojectionError=MAX_REPROJECTION_ERROR)

        if not success:
            return None

        inliers = np.asarray(inliers, dtype=np.int32).flatten()
        points3d = np.array(points3d)
        points2d = np.array(points2d)
        points3d = points3d[inliers]
        points2d = points2d[inliers]
        retval, R, t = cv2.solvePnP(objectPoints=points3d,
                                    imagePoints=points2d,
                                    cameraMatrix=intrinsic_mat,
                                    distCoeffs=None,
                                    flags=cv2.SOLVEPNP_ITERATIVE,
                                    useExtrinsicGuess=True,
                                    rvec=R,
                                    tvec=t)
        return R, t, len(inliers)

    # можно ускорить
    def frames_without_pose():
        frames = []
        for ind, mat in enumerate(view_mats):
            if mat is None:
                frames.append(ind)
        return frames

    def try_add_new_point_to_cloud(corner_id):
        # все фреймы на которых есть данный уголок, и у которых уже найдена view_mat
        # а можно для уголка хранить не все фреймы где он есть, а только те, где он инлаер!
        frames = []
        for frame in frames_of_corner[corner_id]:
            if view_mats[frame[0]] is not None:
                frames.append(frame)
        if len(frames) < 2:
            return

        max_dist = 0
        best_frames = [None, None]
        best_ids = [None, None]

        # можно ускорить хотя бы вдвое
        for frame_1 in frames:
            for frame_2 in frames:
                if frame_1 == frame_2:
                    continue
                # эм, по идее это не возможно, когда есть view_mat есть и t_vec
                if t_vecs[frame_1[0]] is None or t_vecs[frame_2[0]] is None:
                    continue
                t_vec_1 = t_vecs[frame_1[0]]
                t_vec_2 = t_vecs[frame_2[0]]
                dist = np.linalg.norm(t_vec_1 - t_vec_2)
                if max_dist < dist:
                    max_dist = dist
                    best_frames = [frame_1[0], frame_2[0]]
                    best_ids = [frame_1[1], frame_2[1]]

        if max_dist > MIN_DIST_TRIANGULATE:
            ids = np.array([corner_id])
            points1 = corner_storage[best_frames[0]].points[np.array(
                [best_ids[0]])]
            points2 = corner_storage[best_frames[1]].points[np.array(
                [best_ids[1]])]
            # что за ужасный синтаксис??????
            corrs = corrs = Correspondences(ids, points1, points2)
            points3d, ids, med_cos = triangulate_correspondences(
                corrs,
                view_mats[best_frames[0]],
                view_mats[best_frames[1]],
                intrinsic_mat,
                parameters=TriangulationParameters(2, 1e-3, 1e-4))
            # зачем проверка??? зачем делаем поп?????
            if len(points3d) > 0:
                points_cloud[ids[0]] = points3d[0]
                cur_corners_occurencies.pop(ids[0], None)

    # в этой функции мы еще бонусом возвращаем те точки, которые встречаются хотя бы 2 раза
    # а также мы добавляем оккуранси тем точкам которые уже в облаке))))))))))))))))))))))))))))
    def add_corners_occurencies(frame_id):
        two_and_greater = []
        for id in corner_storage[frame_id].ids.flatten():
            if id in cur_corners_occurencies.keys():
                cur_corners_occurencies[id] += 1
                if cur_corners_occurencies[id] >= 2:
                    two_and_greater.append(id)
        return two_and_greater

    # очень эффективно))))))))))
    while len(frames_without_pose()) > 0:
        # пока есть не найденные положения камеры будем искать лучшее для восстановления
        # т.е. такое, где больше всего инлаеров после применения пнп ранзака
        wanted_frames = frames_without_pose()
        # inliers_amount = [] не понял зачем этот лист
        max_col = -1
        best_frame_id = -1
        best_R = None
        best_t = None
        for frame_id in wanted_frames:
            result = find_camera_pose(frame_id)
            if result is not None:
                R, t, col = result
                if col > max_col:
                    max_col = col
                    best_frame_id = frame_id
                    best_R = R
                    best_t = t
        if max_col == -1:
            # больше позиции камер не находятся
            break

        view_mats[best_frame_id] = rodrigues_and_translation_to_view_mat3x4(
            best_R, best_t)
        t_vecs[best_frame_id] = best_t

        try_add = add_corners_occurencies(best_frame_id)
        for corner in try_add:
            try_add_new_point_to_cloud(corner)
        print(
            'Now we add camera pose on {}-th frame, camera pose was calculated by {} inliers'
            .format(best_frame_id, max_col))
        ttl_poses_calculated = np.sum([mat is not None for mat in view_mats])
        percent = "{:.0f}".format(ttl_poses_calculated / frame_count * 100.0)
        print('{}% poses calculated'.format(percent))

        print('{} points in 3D points cloud'.format(len(points_cloud)))

    last_view_mat = None
    for i in range(frame_count):
        if view_mats[i] is None:
            view_mats[i] = last_view_mat
        else:
            last_view_mat = view_mats[i]

    #check that all matrices are rotation matrices
    for ind, i in enumerate(view_mats):
        if not check_if_mat_is_rot_mat(i):
            print(
                'In frame {}/{} we have matrix wich is not rotation matrix:('.
                format(ind + 1, frame_count))
            raise NotImplementedError()

    ids = []
    points = []
    for id in points_cloud.keys():
        ids.append(id)
        points.append(points_cloud[id])

    point_cloud_builder = PointCloudBuilder(np.array(ids, dtype=np.int32),
                                            np.array(points))

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #28
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])
    frame_count = len(corner_storage)
    view_mats = [None] * frame_count

    if known_view_1 is None or known_view_2 is None:
        known_view_1, known_view_2 = init_first_camera_positions(
            intrinsic_mat, corner_storage)
        id_1, view_mat_1 = known_view_1
        id_2, view_mat_2 = known_view_2
        view_mats[id_1], view_mats[id_2] = view_mat_1, view_mat_2
    else:
        id_1, pose_1 = known_view_1
        id_2, pose_2 = known_view_2
        view_mats[id_1] = pose_to_view_mat3x4(pose_1)
        view_mats[id_2] = pose_to_view_mat3x4(pose_2)
    params = TriangulationParameters(max_reprojection_error=2,
                                     min_triangulation_angle_deg=1,
                                     min_depth=0.5)
    correspondences = build_correspondences(corner_storage[id_1],
                                            corner_storage[id_2])
    points, ids, _ = triangulate_correspondences(correspondences,
                                                 view_mats[id_1],
                                                 view_mats[id_2],
                                                 intrinsic_mat, params)

    point_cloud_builder = PointCloudBuilder(ids.reshape(-1), points)

    while True:
        update = False
        for i in range(frame_count):
            if view_mats[i] is not None:
                continue
            corners = corner_storage[i]
            ids, ids1, ids2 = np.intersect1d(corners.ids,
                                             point_cloud_builder.ids,
                                             return_indices=True)
            points_2d, points_3d = corners.points[
                ids1], point_cloud_builder.points[ids2]
            if len(ids) < 6:
                continue
            retval, rvec, tvec, inliers = solvePnPRansac(points_3d,
                                                         points_2d,
                                                         intrinsic_mat,
                                                         distCoeffs=None)
            if not retval:
                continue
            retval, rvec, tvec = solvePnP(points_3d[inliers],
                                          points_2d[inliers],
                                          intrinsic_mat,
                                          distCoeffs=None,
                                          rvec=rvec,
                                          tvec=tvec,
                                          useExtrinsicGuess=True)
            if not retval:
                continue
            view_mats[i] = rodrigues_and_translation_to_view_mat3x4(rvec, tvec)
            update = True
            outliers = np.delete(ids, inliers)
            for j in range(i):
                if view_mats[j] is None:
                    continue
                correspondences = build_correspondences(corners,
                                                        corner_storage[j],
                                                        ids_to_remove=outliers)
                points, ids, _ = triangulate_correspondences(
                    correspondences, view_mats[i], view_mats[j], intrinsic_mat,
                    params)
                point_cloud_builder.add_points(ids.reshape(-1), points)
            click.echo("Process frame %d/%d. %d 3D points found. inliners=%d" %
                       (i + 1, frame_count, len(
                           point_cloud_builder.points), len(inliers)))
        if not update:
            break

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud
Example #29
0
def track_and_calc_colors(camera_parameters: CameraParameters,
                          corner_storage: CornerStorage,
                          frame_sequence_path: str,
                          known_view_1: Optional[Tuple[int, Pose]] = None,
                          known_view_2: Optional[Tuple[int, Pose]] = None) \
        -> Tuple[List[Pose], PointCloud]:
    if known_view_1 is None or known_view_2 is None:
        raise NotImplementedError()

    rgb_sequence = frameseq.read_rgb_f32(frame_sequence_path)
    intrinsic_mat = to_opencv_camera_mat3x3(camera_parameters,
                                            rgb_sequence[0].shape[0])

    frames_cnt = len(rgb_sequence)
    unused = np.array([
        i for i in range(0, frames_cnt)
        if i not in [known_view_1[0], known_view_2[0]]
    ])
    used = np.array([known_view_1[0], known_view_2[0]])
    used_pose = [known_view_1[1], known_view_2[1]]

    points, ids, cos = triangulate(known_view_1[0], known_view_2[0],
                                   known_view_1[1], known_view_2[1],
                                   corner_storage, intrinsic_mat, None)
    point_cloud_builder = PointCloudBuilder(ids, points)
    # print("Add frames", *known_ids)

    while unused.shape[0] > 0:
        added = []
        for i in range(len(unused)):
            pose_i, ids_to_remove = camera_pose(unused[i], corner_storage,
                                                point_cloud_builder,
                                                intrinsic_mat)
            if pose_i is None:
                continue

            for j in range(len(used)):
                points, ids, cos = triangulate(unused[i], used[j], pose_i,
                                               used_pose[j], corner_storage,
                                               intrinsic_mat, ids_to_remove)
                point_cloud_builder.add_points(ids, points)

            used = np.append(used, [unused[i]])
            used_pose.append(pose_i)
            added.append(unused[i])
            # print("Frame", unused[i], "done!!")

        if len(added) == 0:
            break
        unused = np.setdiff1d(unused, added)

        used_pose = recalculate_poses(used, point_cloud_builder,
                                      corner_storage, intrinsic_mat)

    view_mats = [None for i in range(frames_cnt)]
    for i in range(len(used)):
        view_mats[used[i]] = pose_to_view_mat3x4(used_pose[i])

    calc_point_cloud_colors(point_cloud_builder, rgb_sequence, view_mats,
                            intrinsic_mat, corner_storage, 5.0)
    point_cloud = point_cloud_builder.build_point_cloud()
    poses = list(map(view_mat3x4_to_pose, view_mats))
    return poses, point_cloud