Пример #1
0
def create_subvolumes(data, sample, args, method='calculate', show=False):
    """Either saves subvolumes or calculates mean + std from them. Takes edge cropped sample as input.
    Not necessary, since subimages can be calculated from mean+std images."""

    dims = [448, data.shape[2] // 2]
    print_orthogonal(data)

    # Loop for 9 subvolumes
    for n in range(3):
        for nn in range(3):
            # Selection
            x1 = n * 200
            y1 = nn * 200

            # Plot selection
            if show:
                fig, ax = plt.subplots(1)
                ax.imshow(data[:, :, dims[1]])
                rect = patches.Rectangle((x1, y1),
                                         dims[0],
                                         dims[0],
                                         linewidth=3,
                                         edgecolor='r',
                                         facecolor='none')
                ax.add_patch(rect)
                plt.show()

            # Crop subvolume
            subdata = data[x1:x1 + dims[0], y1:y1 + dims[0], :]

            # Save data
            subsample = sample + "_sub" + str(n) + str(nn) + '_'
            if method == 'save':
                subpath = str(args.save_image_path /
                              (sample + "_sub" + str(n) + str(nn)))
                save(subpath, subsample, subdata)
            else:
                pipeline_mean_std(subdata, subsample, args)
Пример #2
0
def pipeline_subvolume(args,
                       sample,
                       individual=False,
                       save_data=True,
                       render=False,
                       use_wide=False):
    """Pipeline for saving subvolumes. Used in run_subvolume script."""
    # 1. Load sample
    # Unpack paths
    save_path = args.save_image_path
    print('Sample name: ' + sample)
    data, bounds = load_bbox(str(args.data_path / sample), args.n_jobs)
    print_orthogonal(data,
                     savepath=str(save_path / "Images" /
                                  (sample + "_input.png")))
    if render:
        render_volume(
            data, str(save_path / "Images" / (sample + "_input_render.png")))

    # 2. Orient array
    data, angles = orient(data, bounds, args.rotation)
    print_orthogonal(data,
                     savepath=str(save_path / "Images" /
                                  (sample + "_orient.png")))

    # 3. Crop and flip volume
    if use_wide:
        wide = args.size_wide
    else:
        wide = args.size['width']
    data, crop = crop_center(data,
                             args.size['width'],
                             wide,
                             method=args.crop_method)  # crop data
    print_orthogonal(data,
                     savepath=str(save_path / "Images" /
                                  (sample + "_orient_cropped.png")))
    if render:
        render_volume(
            data,
            str(save_path / "Images" /
                (sample + "_orient_cropped_render.png")))

    # Different pipeline for large dataset
    if data.shape[0] > 799 and data.shape[1] > 799 and save_data:
        create_subvolumes(data, sample, args)
        return

    # Save crop data
    if data.shape[1] > args.size['width'] and save_data:
        save(save_path + '/' + sample + '_sub1', sample + '_sub1_',
             data[:, :args.size['width'], :])
        save(save_path + '/' + sample + '_sub2', sample + '_sub2_',
             data[:, -args.size['width']:, :])
    elif save_data:
        save(save_path + '/' + sample, sample, data)
    else:
        return data
Пример #3
0
                     method='loop')
plt.imshow(mask)
plt.show()

# Spectral clustering
# spectral_clusters_scikit(data[data.shape[0] // 2, :, :].T, 6)

# Scikit image segmentation
segment_clusters(data[data.shape[0] // 2, :, :].T, 6)

# 3D clustering in parallel
data_downscaled = zoom(data, 0.25, order=3)
mask_x = Parallel(n_jobs=8)(
    delayed(kmeans_scikit)(
        data_downscaled[i, :, :].T, n_clusters, scale=True, method='loop')
    for i in tqdm(range(data_downscaled.shape[0]), 'Calculating mask'))
mask_y = Parallel(n_jobs=8)(
    delayed(kmeans_scikit)(
        data_downscaled[:, i, :].T, n_clusters, scale=True, method='loop')
    for i in tqdm(range(data_downscaled.shape[1]), 'Calculating mask'))
mask = (np.array(mask_x) + np.array(mask_y).T) / 2
mask = zoom(mask, 4.0, order=3)
mask = np.transpose(np.array(mask > 0.5), (0, 2, 1))
mask_array = np.zeros(data.shape)
try:
    mask_array[:, :, :mask.shape[2]] = mask
except ValueError:
    mask_array = mask[:, :, :data.shape[2]]
print_orthogonal(mask_array, True)
render_volume(mask_array * data, None, False)
Пример #4
0
def orient(data, bounds, choice=1):
    """Detects sample orientation and rotates it along the z-axis.

    Parameters
    ----------
    data : ndarray
        Input data.
    bounds : list
        List of bounding box coordinates for the sample. Obtained during sample loading.
    choice : int
        Method to detect orientation:
        0 = No rotation.
        1 = Bounding box angles.
        2 = PCA angles
        3 = Circle fitting (gradient descent optimization)
        4 = Average of 1, 2 and 3.

    Returns
    -------
    Rotated data, rotation angles
    """
    # Sample dimensions
    dims = np.array(np.shape(data))

    # Skip large sample
    if dims[0] * dims[1] * dims[2] > 3e9:  # Samples > 3GB
        print('Skipping orientation for large sample')
        return data, (0, 0)

    # Ignore edges of sample
    cut1 = int((1 / 4) * len(bounds[0]))
    cut2 = int((1 / 2) * len(bounds[0]))

    # Get bounding box angles
    theta_x1, line_x1 = get_angle(bounds[0][cut1:cut2], bool(0))
    theta_x2, line_x2 = get_angle(bounds[1][cut1:cut2], bool(0))
    theta_y1, line_y1 = get_angle(bounds[2][cut1:cut2], bool(0))
    theta_y2, line_y2 = get_angle(bounds[3][cut1:cut2], bool(0))
    angle1 = 0.5 * (theta_x1 + theta_x2)
    angle2 = 0.5 * (theta_y1 + theta_y2)

    # Plot bbox fits
    xpoints = np.linspace(-len(bounds[0]) / 2,
                          len(bounds[0]) / 2, len(bounds[0]))
    plt.subplot(141)
    plt.plot(xpoints, bounds[0])
    plt.plot(xpoints,
             (xpoints - line_x1[2]) * (line_x1[1] / line_x1[0]) + line_x1[3],
             'r--')
    plt.subplot(142)
    plt.plot(xpoints, bounds[1])
    plt.plot(xpoints,
             (xpoints - line_x2[2]) * (line_x2[1] / line_x2[0]) + line_x2[3],
             'r--')
    plt.subplot(143)
    plt.plot(xpoints, bounds[2])
    plt.plot(xpoints,
             (xpoints - line_y1[2]) * (line_y1[1] / line_y1[0]) + line_y1[3],
             'r--')
    plt.subplot(144)
    plt.plot(xpoints, bounds[3])
    plt.plot(xpoints,
             (xpoints - line_y2[2]) * (line_y2[1] / line_y2[0]) + line_y2[3],
             'r--')
    plt.suptitle('BBox angles: {0}, {1}'.format(angle1, angle2))
    plt.show(bbox_inches="tight")

    # PCA angles
    xangle = pca_angle(data[dims[0] // 2, :, :], 1, 80)
    yangle = pca_angle(data[:, dims[1] // 2, :], 1, 80)

    # Select rotation
    if choice == 1:
        pass
    elif choice == 2:
        print('PCA angles: {0}, {1}'.format(xangle, yangle))
        angle1 = xangle
        angle2 = yangle
    elif choice == 3 or choice == 4:
        origrad = FindOriGrad(alpha=0.5, h=5, n_iter=60)
        mask = data > 70
        binned = zoom(mask, (0.125, 0.125, 0.125))
        print_orthogonal(binned)
        ori = origrad(binned)
        if choice == 3:
            print('Gradient descent selected.')
            angle1 = ori[0]
            angle2 = ori[1]
        else:
            print('Average selected.')
            angle1 = (ori[0] + xangle + angle1) / 3
            angle2 = (ori[1] + yangle + angle2) / 3
    else:
        print('No rotation performed.')
        return data, (0, 0)

    # 1st rotation
    if 4 <= abs(angle1) <= 20:  # check for too small and large angle
        data = opencv_rotate(data, 0, angle1)
    print_orthogonal(data)

    # 2nd rotation
    if 4 <= abs(angle2) <= 20:
        data = opencv_rotate(data, 1, angle2)
    print_orthogonal(data)

    return data, (angle1, angle2)
Пример #5
0
def pipeline_mean_std(image_path, args, sample='', mask_path=None, data=None):
    """Runs full processing pipeline on single function. No possibility for subvolumes. Used in run_mean_std."""

    # 1. Load sample
    save_path = args.save_image_path
    save_path.mkdir(exist_ok=True)
    if data is None:
        print('1. Load sample')
        data, bounds = load_bbox(image_path, n_jobs=args.n_jobs)
        print_orthogonal(data,
                         savepath=str(save_path / 'Images' /
                                      (sample + '_input.png')))
        render_volume(data,
                      savepath=str(save_path / 'Images' /
                                   (sample + '_render_input.png')))
    if mask_path is not None:
        mask, _ = load_bbox(mask_path)
        print_orthogonal(mask)

    # 2. Segment BCI mask
    if args.segmentation is 'torch' or args.segmentation is 'kmeans':
        # Bottom offset
        if data.shape[2] < 1000:
            offset = 0
        elif 1000 <= data.shape[2] < 1600:
            offset = 20
        else:
            offset = 50
        # Pytorch segmentation
        if args.segmentation is 'torch':
            cropsize = 512
            mask = segmentation_pytorch(data, args.model_path, args.snapshots,
                                        cropsize, offset)
        # K-means segmentation
        elif args.segmentation is 'kmeans':
            mask = segmentation_kmeans(data,
                                       n_clusters=3,
                                       offset=offset,
                                       n_jobs=args.n_jobs)
        else:
            raise Exception('Invalid segmentation selection!')
    elif args.segmentation is 'unet':
        #args.mask_path.mkdir(exist_ok=True)
        mask = segmentation_unet(data, args, sample)
    # CNTK segmentation
    else:
        mask = segmentation_cntk(data, args.model_path)
    print_orthogonal(mask * data,
                     savepath=str(save_path / 'Images' /
                                  (sample + '_mask.png')))
    render_volume(
        (mask > args.threshold) * data,
        savepath=str(save_path / 'Images' / (sample + '_render_mask.png')))
    save(str(save_path / 'Masks' / sample), sample, mask)

    # Crop
    crop = args.size['crop']
    data = data[crop:-crop, crop:-crop, :]
    mask = mask[crop:-crop, crop:-crop, :]
    size_temp = args.size.copy()
    size_temp['width'] = args.size['width'] - 2 * crop

    # Calculate cartilage depth
    data = np.flip(data, 2)
    mask = np.flip(mask, 2)  # flip to begin indexing from surface
    dist = deep_depth(data, mask)
    size_temp['deep'] = (0.6 * dist).astype('int')
    print('Automatically setting deep voi depth to {0}'.format(
        (0.6 * dist).astype('int')))
    #
    # 4. Get VOIs
    print('4. Get interface coordinates:')
    surf_voi, deep_voi, calc_voi, otsu_thresh = get_interface(
        data, size_temp, (mask > args.threshold), n_jobs=args.n_jobs)
    # Show and save results
    print_orthogonal(surf_voi,
                     savepath=str(save_path / "Images" /
                                  (sample + "_surface.png")))
    print_orthogonal(deep_voi,
                     savepath=str(save_path / "Images" /
                                  (sample + "_deep.png")))
    print_orthogonal(calc_voi,
                     savepath=str(save_path / "Images" / (sample + "_cc.png")))
    render_volume(np.flip(surf_voi, 2),
                  str(save_path / "Images" / (sample + "_render_surface.png")))
    render_volume(np.flip(deep_voi, 2),
                  str(save_path / "Images" / (sample + "_render_deep.png")))
    render_volume(calc_voi,
                  str(save_path / "Images" / (sample + "_render_cc.png")))

    # 5. Calculate mean and std
    print('5. Save mean and std images')
    mean_std(surf_voi, str(save_path), sample, deep_voi, calc_voi, otsu_thresh)
    if size_temp['surface'] > 25:
        mean_std(surf_voi[:, :, :surf_voi.shape[2] // 2], str(save_path),
                 sample + '_25', deep_voi,
                 calc_voi[:, :, :calc_voi.shape[2] // 2], otsu_thresh)
        mean_std(surf_voi[:, :, surf_voi.shape[2] // 2:], str(save_path),
                 sample + '_25_backup', deep_voi,
                 calc_voi[:, :, calc_voi.shape[2] // 2:], otsu_thresh)
Пример #6
0
def pipeline_subvolume_mean_std(args, sample):
    """Calculates volume and calls subvolume or mean + std pipeline

    1. Loads µCT stack

    2. Orients sample

    3. Crops sample edges

    4. Calls mean+std pipeline (or processes subvolumes individually).

    Parameters
    ----------
    sample : str
        Sample name.
    args : Namespace
        Namespace containing processing arguments:
        data_path = Directory containing µCT datasets.
        save_image_path = Path for saving processed datasets.
        rotation = Selected rotation method. See rotations/orient.
        size = Dictionary including saved VOI dimensions. See extract_volume.
        size_wide = Different width for edge crop. Used for Test set 1.
        crop_method = Method for finding sample center.
        n_jobs = Number of parallel workers.
    render : bool
        Choice whether to save render images of the processed sample.

    """

    # 1. Load sample
    # Unpack paths
    save_path = args.save_image_path
    render = args.render
    print('Sample name: ' + sample)
    data, bounds = load_bbox(str(args.data_path), args.n_jobs)
    print_orthogonal(data,
                     savepath=str(save_path / "Images" /
                                  (sample + "_large.png")))
    if render:
        render_volume(
            data, str(save_path / "Images" / (sample + "_render_large.png")))

    # 2. Orient array
    print('2. Orient sample')
    if data.shape[0] * data.shape[1] * data.shape[
            2] < 3e9:  # Orient samples > 3GB
        data, angles = orient(data, bounds, args.rotation)
        print_orthogonal(data,
                         savepath=str(save_path / "Images" /
                                      (sample + "_orient.png")))

    # 3. Crop and flip volume
    print('3. Crop and flip center volume:')
    data, _ = crop_center(data,
                          args.size['width'],
                          args.size['width'],
                          method=args.crop_method)  # crop data
    print_orthogonal(data,
                     savepath=str(save_path / "Images" /
                                  (sample + "_input.png")))
    if render:
        render_volume(
            data, str(save_path / "Images" / (sample + "_render_input.png")))

    # Save crop data
    (save_path / 'Cropped').mkdir(exist_ok=True)
    save(str(save_path / 'Cropped' / sample), sample, data)

    # Different pipeline for large dataset
    if args.n_subvolumes > 1:  # Segment and calculate each subvolume individually
        create_subvolumes(data, sample, args)
    else:  # Calculate
        pipeline_mean_std(str(save_path / 'Cropped' / sample),
                          args,
                          sample,
                          data=data)
Пример #7
0
ax2.set_title('4mm image')
ax3 = fig.add_subplot(133)
ax3.imshow(data_cor, cmap='gray')
ax3.set_title('4mm image')
plt.show()
render_volume(data, None, False)

# Downscale images

# K-means clustering

# 3D clustering in parallel
mask = Parallel(n_jobs=12)(delayed(kmeans_opencv)(data[i, :, :].T, n_clusters, scale=True, method='loop')
                           for i in tqdm(range(data.shape[0]), 'Calculating mask'))
mask = np.transpose(np.array(mask), (0, 2, 1))
print_orthogonal(mask, True)

# 2D clustering
mask_2mm = kmeans_opencv(cor_2mm, n_clusters, True, limit=2, method='loop')
mask_4mm = kmeans_opencv(cor_4mm, n_clusters, True, limit=2, method='loop')
mask_4mm_2 = kmeans_opencv(data_cor, n_clusters, True, limit=2, method='loop')

# Show cluster images
fig = plt.figure(dpi=300)
ax1 = fig.add_subplot(131)
ax1.imshow(mask_2mm)
ax1.set_title('2mm mask')
ax2 = fig.add_subplot(132)
ax2.imshow(mask_4mm)
ax2.set_title('4mm mask')
ax3 = fig.add_subplot(133)
Пример #8
0
def segmentation_kmeans(array,
                        n_clusters=3,
                        offset=0,
                        method='scikit',
                        zoom_factor=4.0,
                        n_jobs=12):
    """Pipeline for segmentation using kmeans clustering.

    Parameters
    ----------
    array : ndarray (3-dimensional)
        Input data.
    n_clusters : int
        Number of kmeans clusters.
    offset : int
        Bottom offset for segmentation. Used to exclude part of large bone plate.
    method : str
        Algorithm for kmeans segmentation. Choices = "scikit", "opencv". Defaults to scikit-learn.
    zoom_factor : float
        Factor for downscaling input data for segmentation.
    n_jobs : int
        Number of parallel workers.

    Returns
    -------
    Segmented calcified tissue mask.
    """

    # Segmentation
    dims = array.shape
    array = zoom(array[:, :, offset:], 1 / zoom_factor,
                 order=3)  # Downscale images
    if method is 'scikit':
        mask_x = Parallel(n_jobs=n_jobs)(
            delayed(kmeans_scikit)(
                array[i, :, :].T, n_clusters, scale=True, method='loop')
            for i in tqdm(range(array.shape[0]), 'Calculating mask (X)'))
        mask_y = Parallel(n_jobs=n_jobs)(
            delayed(kmeans_scikit)(
                array[:, i, :].T, n_clusters, scale=True, method='loop')
            for i in tqdm(range(array.shape[1]), 'Calculating mask (Y)'))
        print_orthogonal(np.array(mask_x))
        print_orthogonal(np.array(mask_y).T)

        mask = (np.array(mask_x) + np.array(mask_y).T) / 2  # Average mask
        mask = zoom(mask, zoom_factor, order=3)  # Upscale mask
    else:  # OpenCV
        mask_x = Parallel(n_jobs=n_jobs)(
            delayed(kmeans_opencv)(
                array[i, :, :].T, n_clusters, scale=True, method='loop')
            for i in tqdm(range(array.shape[0]), 'Calculating mask (X)'))
        mask_y = Parallel(n_jobs=n_jobs)(
            delayed(kmeans_opencv)(
                array[:, i, :].T, n_clusters, scale=True, method='loop')
            for i in tqdm(range(array.shape[1]), 'Calculating mask (Y)'))
        mask = (np.array(mask_x) + np.array(mask_y)) / 2  # Average mask
        mask = zoom(mask, zoom_factor, order=3)  # Upscale mask
    # Reshape
    mask = np.transpose(mask, (0, 2, 1))

    # Take offset and zoom into account
    mask_array = np.zeros(dims)
    try:
        mask_array[:, :, offset:mask.shape[2] +
                   offset] = mask  # Squeeze mask array to fit calculated mask
    except ValueError:
        mask_array[:, :, offset:] = mask[:, :, :mask_array.shape[
            2] - offset]  # Squeeze calculated mask to fit array

    return mask_array >= 0.5