예제 #1
0
def find_largest_size(folder_list, downsampling, network_downsampling,
                      queue_size):
    for folder in folder_list:
        # Read mask image
        undistorted_mask_boundary = cv2.imread(
            str(folder / "undistorted_mask.bmp"), cv2.IMREAD_GRAYSCALE)
        # Downsample and crop the undistorted mask image
        _, start_h, end_h, start_w, end_w = \
            utils.downsample_and_crop_mask(undistorted_mask_boundary, downsampling_factor=downsampling,
                                           divide=network_downsampling)
        queue_size.put([end_h - start_h, end_w - start_w])
예제 #2
0
def pre_processing_data(
        process_id, folder_list, downsampling, network_downsampling, is_hsv,
        inlier_percentage, visible_interval, suggested_h, suggested_w,
        queue_clean_point_list, queue_intrinsic_matrix, queue_point_cloud,
        queue_mask_boundary, queue_view_indexes_per_point,
        queue_selected_indexes, queue_visible_view_indexes, queue_extrinsics,
        queue_projection, queue_crop_positions, queue_estimated_scale):
    for folder in folder_list:
        # We use folder path as the key for dictionaries
        # Read undistorted mask image
        folder_str = str(folder)
        undistorted_mask_boundary = cv2.imread(
            str(folder / "undistorted_mask.bmp"), cv2.IMREAD_GRAYSCALE)
        # Downsample and crop the undistorted mask image
        cropped_downsampled_undistorted_mask_boundary, start_h, end_h, start_w, end_w = \
            utils.downsample_and_crop_mask(undistorted_mask_boundary, downsampling_factor=downsampling,
                                           divide=network_downsampling, suggested_h=suggested_h,
                                           suggested_w=suggested_w)
        queue_mask_boundary.put(
            [folder_str, cropped_downsampled_undistorted_mask_boundary])
        queue_crop_positions.put(
            [folder_str, [start_h, end_h, start_w, end_w]])
        # Read selected image indexes and stride
        stride, selected_indexes = utils.read_selected_indexes(folder)
        queue_selected_indexes.put([folder_str, selected_indexes])
        # Read visible view indexes
        visible_view_indexes = utils.read_visible_view_indexes(folder)
        queue_visible_view_indexes.put([folder_str, visible_view_indexes])
        # Read undistorted camera intrinsics
        undistorted_camera_intrinsic_per_view = utils.read_camera_intrinsic_per_view(
            folder)
        # Downsample and crop the undistorted camera intrinsics
        # Assuming for now that camera intrinsics within each clip remains the same
        cropped_downsampled_undistorted_intrinsic_matrix = utils.modify_camera_intrinsic_matrix(
            undistorted_camera_intrinsic_per_view[0],
            start_h=start_h,
            start_w=start_w,
            downsampling_factor=downsampling)
        queue_intrinsic_matrix.put(
            [folder_str, cropped_downsampled_undistorted_intrinsic_matrix])
        # Read sparse point cloud from SfM
        point_cloud = utils.read_point_cloud(str(folder / "structure.ply"))
        queue_point_cloud.put([folder_str, point_cloud])
        # self.point_cloud_per_seq[folder] = point_cloud
        # Read visible view indexes per point
        view_indexes_per_point = utils.read_view_indexes_per_point(
            folder,
            visible_view_indexes=visible_view_indexes,
            point_cloud_count=len(point_cloud))
        # Update view_indexes_per_point_per_seq with neighborhood frames to increase stability and
        # avoid as much occlusion problem as possible
        view_indexes_per_point = utils.overlapping_visible_view_indexes_per_point(
            view_indexes_per_point, visible_interval)
        queue_view_indexes_per_point.put([folder_str, view_indexes_per_point])
        # Read pose data for all visible views
        poses = utils.read_pose_data(folder)
        # Calculate extrinsic and projection matrices
        visible_extrinsic_matrices, visible_cropped_downsampled_undistorted_projection_matrices = \
            utils.get_extrinsic_matrix_and_projection_matrix(poses,
                                                             intrinsic_matrix=
                                                             cropped_downsampled_undistorted_intrinsic_matrix,
                                                             visible_view_count=len(visible_view_indexes))
        queue_extrinsics.put([folder_str, visible_extrinsic_matrices])
        queue_projection.put([
            folder_str,
            visible_cropped_downsampled_undistorted_projection_matrices
        ])
        # Get approximate data global scale to reduce training data imbalance
        global_scale = utils.global_scale_estimation(
            visible_extrinsic_matrices, point_cloud)
        queue_estimated_scale.put([folder_str, global_scale])
        visible_cropped_downsampled_imgs = utils.get_color_imgs(
            folder,
            visible_view_indexes=visible_view_indexes,
            start_h=start_h,
            start_w=start_w,
            end_h=end_h,
            end_w=end_w,
            downsampling_factor=downsampling,
            is_hsv=is_hsv)
        # Calculate contaminated point list
        clean_point_indicator_array = utils.get_clean_point_list(
            imgs=visible_cropped_downsampled_imgs,
            point_cloud=point_cloud,
            mask_boundary=cropped_downsampled_undistorted_mask_boundary,
            inlier_percentage=inlier_percentage,
            projection_matrices=
            visible_cropped_downsampled_undistorted_projection_matrices,
            extrinsic_matrices=visible_extrinsic_matrices,
            is_hsv=is_hsv,
            view_indexes_per_point=view_indexes_per_point)
        queue_clean_point_list.put([folder_str, clean_point_indicator_array])
        print("sequence {} finished".format(folder_str))

    print("{}th process finished".format(process_id))