示例#1
0
def old_preprocessing_main(args):
    if args.t is None and args.s is None and args.k is None:
        raise ValueError('No preprocessing tasks were specified')

    verbose_print(args, f"Preprocessing {args.input}")

    if os.path.isdir(args.input):
        # Load series of 2D TIFFs and process in parallel
        paths, filenames = tifs_in_dir(args.input)

        img = io.imread(paths[0])
        shape = (len(paths), *img.shape)
        if args.float:
            dtype = 'float32'
        else:
            dtype = img.dtype

        arr = io.new_zarr(args.zarr, shape=shape, dtype=dtype, chunks=tuple(args.c))

        args_list = []
        for i, (path, _) in enumerate(zip(paths, filenames)):
            args_list.append((args, path, arr, i))

        with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
            list(tqdm.tqdm(pool.imap_unordered(_preprocess_image2d, args_list), total=len(args_list)))

        if args.p is not None:
            before = io.imread(paths[args.p])
            after = arr[args.p]

    elif os.path.isdir(args.input):
        # Load 3D TIFF and process in memory
        img = io.imread(args.input)
        # Keep reference to before image if plotting
        if args.p is not None:
            before = np.copy(img[args.p])
        verbose_print(args, f"Loaded image: {img.shape} {img.dtype}")
        img = preprocess_image3d(args, img)
        if args.p is not None:
            after = np.copy(img[args.p])

    else:
        raise ValueError('Input is not a valid directory or file')

    # Show A/B plot
    if args.p is not None:
        plt.subplot(121)
        plt.imshow(before)
        plt.title('Before')
        plt.subplot(122)
        plt.imshow(after)
        plt.title('After')
        plt.show()

    verbose_print(args, f"Preprocessing done!")
示例#2
0
def stack_main(args):
    verbose_print(args, f'Stacking images in {args.input}')

    paths, filenames = utils.tifs_in_dir(args.input)
    verbose_print(args, f'Found {len(paths)} images')

    img0 = io.imread(paths[0])
    shape2d, dtype = img0.shape, img0.dtype
    img = np.empty((len(paths), *shape2d), dtype)
    for z, path in tqdm(enumerate(paths), total=len(paths)):
        img[z] = io.imread(path)

    io.imsave(args.output, img, compress=1)

    verbose_print(args, f'Stacking done!')
示例#3
0
def foreground_main(args):
    verbose_print(args, f'Segmenting foreground from {args.input}')

    # Load the input image
    data = io.imread(args.input)

    # Smoothing
    if args.g is not None:
        data = gaussian_blur(data, args.g).astype(data.dtype)

    # Threshold image
    foreground = (data > args.t)  # .astype(np.uint8)

    # Fill holes
    # This is done slice-by-slice for now since there could be imaging problems where
    # a part of a ventricle is actually in the image at z = 0 or z = -1
    output = np.empty(foreground.shape, dtype=np.uint8)
    for i, img in enumerate(foreground):
        output[i] = binary_fill_holes(img)
    output *= 255

    # Save the result to TIFF
    io.imsave(args.output, output, compress=3)
    verbose_print(args, f'Segmentation written to {args.output}')

    verbose_print(args, f'Foreground segmentation done!')
示例#4
0
def preprocess_image2d(args, path, arr, i):
    img = io.imread(path)

    # Background removal
    if args.t is not None:
        # verbose_print(args, f"Performing background removal with threshold {args.t}")
        img = remove_background(img, args.t)

    # Histogram equalization
    if args.k is not None:
        if args.k == 0:
            # verbose_print(args, f"Performing histogram equalization with default kernel size")
            kernel_size = None
        else:
            # verbose_print(args, f"Performing histogram equalization with kernel size {args.k}")
            kernel_size = args.k
        img = equalize_adapthist(img, kernel_size=kernel_size)

    # Convert to float (can't normalize based on single slice)
    if args.float:
        img = img_as_float32(img)
        # verbose_print(args, f"Converted to normalized float32: min {img.min():.3f}, max {img.max():.3f}")

    # Denoising
    if args.s is not None:
        # verbose_print(args, f"Performing noise removal with sigma {args.s} and wavelet {args.w}")
        img = denoise2d(img, args.s, args.w)

    arr[i] = img
示例#5
0
def read_process_write(path, f, output, compress=3):
    # Get output path from input name and output folder
    filename = os.path.basename(path)
    output_path = os.path.join(output, filename)
    # Read image
    img = io.imread(path)
    # Process and write
    process_write(img, f, output_path, compress)
示例#6
0
def read_downsample_write(path, factor, output_dir, filename, compress=1):
    arr = io.imread(path)
    if isinstance(factor, int):
        factors = tuple(factor for _ in range(arr.ndim))
    else:
        factors = tuple(factor)
    data = downsample(arr, factors)
    output = os.path.join(output_dir, filename)
    io.imsave(output, data, compress=compress)
示例#7
0
def rescale_image(path, threshold, max_val, output, filename, compress):
    img = io.imread(path).astype(np.float32)  # load image as float

    # Subtract threshold and remove background by clipping negative values
    img -= threshold
    img = np.clip(img, 0, None)

    # Divide by max_val (accounting for threshold) to scale to [0, 1]
    img = img / (max_val - threshold)

    # Save result
    output_path = os.path.join(output, filename)
    io.imsave(output_path, img, compress=compress)
示例#8
0
文件: cyto.py 项目: chunglabmit/scout
def mesh_main(args):
    if args.g is not None:
        if len(args.g) == 1:
            sigma = args.g[0]
        else:
            sigma = tuple(args.g)

    if args.d is None:
        downsample_factor = 1
    else:
        downsample_factor = np.asarray(args.d)

    verbose_print(args, f'Meshing segmentation at {args.input}')

    # Calculate the downsampled voxel size
    voxel_orig = read_voxel_size(args.voxel_size)
    voxel_down = tuple(voxel_orig * downsample_factor)
    verbose_print(args, f'Original voxel size (um): {voxel_orig}')
    verbose_print(args, f'Downsampled voxel size (um): {voxel_down}')

    # Load segmentation
    seg = io.imread(args.input)

    # Smooth segmentation
    if args.g is not None:
        seg = smooth_segmentation(seg, sigma)
        verbose_print(args, f'Smoothed segmentation with sigma {sigma}')

    # Calculate mesh surface
    verts, faces, normals, values = marching_cubes(seg, args.l, voxel_down,
                                                   args.s)
    mesh = {
        'verts': verts,
        'faces': faces,
        'normals': normals,
        'values': values
    }
    verbose_print(args, f'Computed mesh with {len(normals)} normals')

    # Plot mesh
    if args.plot:
        plot_mesh(mesh['verts'], mesh['faces'])
        mlab.show()

    # Save mesh
    save_mesh(args.output, mesh)
    verbose_print(args, f'Mesh saved to {args.output}')

    verbose_print(args, 'Meshing done!')
示例#9
0
def ventricle_main(args):
    verbose_print(args, f'Segmenting ventricles in {args.input}')

    # Load the input image
    data = io.imread(args.input)

    # Load the model
    if args.model.endswith('.pt'):
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        # device = torch.device("cpu")
        model = load_model(args.model, device)
        model = model.eval()
        verbose_print(
            args,
            f'Pytorch model successfully loaded from {args.model} to {device} device'
        )
        # Segment the input image
        verbose_print(args, f'Segmentation progress:')
        output = segment_ventricles(model, data, args.t, device)
    elif args.model.endswith('.h5'):
        model = load_keras_model(args.model)
        verbose_print(args,
                      f'Kerass model successfully loaded from {args.model}')
        # Segment the input image
        verbose_print(args, f'Segmentation progress:')
        output = segment_ventricles_keras(model, data, args.t)

    # Remove border regions
    if args.exclude_border:
        verbose_print(args, f'Removing regions connected to image border')
        # This could also be done in 3D instead of slice-by-slice
        # I'm not sure if images will start in ventricle, so doing slice-by-slice to be safe
        img = np.zeros_like(output)
        for i, data in tqdm(enumerate(output), total=len(output)):
            img[i] = clear_border(data)
        output = img

    # Save the result to TIFF
    io.imsave(args.output, output, compress=3)
    verbose_print(args, f'Segmentation written to {args.output}')

    verbose_print(args, f'Ventricle segmentation done!')
示例#10
0
def convert_main(args):
    nb_workers = _check_workers(args)

    verbose_print(args, f"Converting {args.input} to Zarr")

    # Find all TIFFs
    paths, filenames = tifs_in_dir(args.input)
    verbose_print(args, f"Found {len(paths)} TIFFs")
    paths_chunked = [paths[pos:pos + args.c[0]] for pos in range(0, len(paths), args.c[0])]

    img = io.imread(paths[0])
    shape = (len(paths), *img.shape)
    dtype = img.dtype
    chunks = tuple(args.c)
    arr = io.new_zarr(args.output, shape=shape, dtype=dtype, chunks=chunks)

    verbose_print(args, f"Writiing to {args.output}")
    args_list = []
    for i, paths_batch in enumerate(paths_chunked):
        args_list.append((paths_batch, i, chunks[0], arr))
    with multiprocessing.Pool(nb_workers) as pool:
        list(tqdm.tqdm(pool.imap(_convert_batch, args_list), total=len(args_list)))

    verbose_print(args, f"Conversion done!")
示例#11
0
def contrast_main(args):
    # Initial setup
    nb_workers = _check_workers(args)

    if args.k is None:
        verbose_print(args, f"Performing histogram equalization with default kernel size")
        kernel_size = None
    else:
        verbose_print(args, f"Performing histogram equalization with kernel size {args.k}")
        kernel_size = args.k

    # Find all TIFFs
    paths, filenames = tifs_in_dir(args.input)
    verbose_print(args, f"Found {len(paths)} TIFFs")

    # Make output folder
    os.makedirs(args.output, exist_ok=True)

    for path, filename in tqdm.tqdm(zip(paths, filenames), total=len(paths)):
        img = io.imread(path)
        adjusted = equalize_adapthist(img, kernel_size=kernel_size).astype(np.float32)
        io.imsave(os.path.join(args.output, filename), adjusted, compress=args.c)

    verbose_print(args, f"Contrast done!")
示例#12
0
def test_imread():
    img = io.imread('data/syto.tif')
    assert img.shape == (64, 1024, 1024)
    assert img.dtype == 'uint16'
    assert img.max() == 4095
    assert img.min() == 0
示例#13
0
def image(image_path):
    return io.imread(image_path)
示例#14
0
def convert_batch(paths_batch, i, size, arr):
    start = i * size
    stop = start + len(paths_batch)
    img = np.asarray([io.imread(path) for path in paths_batch])
    arr[start:stop] = img
示例#15
0
def estimate_histogram(paths, nbins=256):
    image = np.asarray([io.imread(path) for path in tqdm.tqdm(paths)])  # Load images into 3D array
    counts, bin_centers = histogram(image, nbins=nbins)
    return counts, bin_centers
示例#16
0
def wholeorg_features(args, features, gate_labels, niche_labels):
    celltypes = read_csv(os.path.join(args.input, 'dataset/celltype_names.csv'))
    niches = read_csv(os.path.join(args.input, 'dataset/niche_names.csv'))

    if args.g is not None:
        if len(args.g) == 1:
            sigma = args.g[0]
        else:
            sigma = tuple(args.g)

    if args.d is None:
        downsample_factor = 1
    else:
        downsample_factor = np.asarray(args.d)

    voxel_orig = read_voxel_size(os.path.join(args.input, 'dataset/voxel_size.csv'))
    verbose_print(args, f'Original voxel size: {voxel_orig}')

    voxel_down = tuple(voxel_orig * downsample_factor)
    verbose_print(args, f'Downsampled voxel size: {voxel_down}')

    # Overall organoid
    foreground = io.imread(os.path.join(args.input, 'dataset/segment_foreground.tif'))
    verbose_print(args, f'Loaded foreground segmentation: {foreground.shape}')

    if not np.allclose(voxel_down, max(voxel_down)):
        voxel_isotropic = tuple(max(voxel_down) * np.ones(len(voxel_down)))
        verbose_print(args, f'Resampling foreground to isotropic: {voxel_isotropic}')
        factors = np.asarray(voxel_isotropic) / np.asarray(voxel_down)
        shape_isotropic = tuple([int(s / f) for s, f in zip(foreground.shape, factors)])
        foreground = resize(foreground, output_shape=shape_isotropic, order=0).astype(foreground.dtype)
        verbose_print(args, f'Resampled foreground segmentation: {foreground.shape}')
    else:
        voxel_isotropic = voxel_down

    regions = regionprops(foreground)

    # Find largest region
    vol = 0
    idx = None
    for i, region in enumerate(regions):
        if region.area > vol:
            idx = i
    largest = regions[idx]

    volume_pixels = largest.area
    eq_diam_pixels = largest.equivalent_diameter
    major_axis_pixels = largest.major_axis_length
    minor_axis_pixels = largest.minor_axis_length

    volume_mm3 = volume_pixels * (np.asarray(voxel_isotropic) / 1000).prod()
    eq_diam_mm = eq_diam_pixels * voxel_isotropic[0] / 1000
    major_axis_mm = major_axis_pixels * voxel_isotropic[0] / 1000
    minor_axis_mm = minor_axis_pixels * voxel_isotropic[0] / 1000
    axis_ratio = major_axis_pixels / minor_axis_pixels

    print(f'Organoid volume (mm3): {volume_mm3:.3f}')
    print(f'Organoid equivalent diameter (mm): {eq_diam_mm:.3f}')
    print(f'Organoid major axis length (mm): {major_axis_mm:.3f}')
    print(f'Organoid minor axis length (mm): {minor_axis_mm:.3f}')
    print(f'Organoid axis ratio: {axis_ratio:.3f}')

    features[f'organoid volume (mm3)'] = volume_mm3
    features[f'organoid equivalent diameter (mm)'] = eq_diam_mm
    features[f'organoid major axis (mm)'] = major_axis_mm
    features[f'organoid minor axis (mm)'] = minor_axis_mm
    features[f'organoid axis ratio'] = axis_ratio

    # Ventricles
    ventricles = io.imread(os.path.join(args.input, 'dataset/segment_ventricles.tif'))
    verbose_print(args, f'Loaded ventricle segmentation: {ventricles.shape}')

    # Smooth segmentation
    if args.g is not None:
        ventricles = smooth_segmentation(ventricles, sigma) > 0.5
        verbose_print(args, f'Smoothed segmentation with sigma {sigma}')

    if not np.allclose(voxel_down, max(voxel_down)):
        voxel_isotropic = tuple(max(voxel_down) * np.ones(len(voxel_down)))
        verbose_print(args, f'Resampling ventricles to isotropic: {voxel_isotropic}')
        factors = np.asarray(voxel_isotropic) / np.asarray(voxel_down)
        shape_isotropic = tuple([int(s / f) for s, f in zip(ventricles.shape, factors)])
        ventricles = resize(ventricles, output_shape=shape_isotropic, order=0, preserve_range=True).astype(
            ventricles.dtype)
        verbose_print(args, f'Resampled ventricle segmentation: {ventricles.shape}')
    else:
        voxel_isotropic = voxel_down

    labels, nb_ventricles = label(ventricles)
    verbose_print(args, f'Found {nb_ventricles} connected components in ventricle segmentation')

    regions = regionprops(labels)

    volumes_pixels = np.asarray([region.area for region in regions])
    eq_diams_pixels = np.asarray([region.equivalent_diameter for region in regions])
    major_axes_pixels = np.asarray([region.major_axis_length for region in regions])
    minor_axes_pixels = np.asarray([region.minor_axis_length for region in regions])

    volumes_um3 = volumes_pixels * np.asarray(voxel_isotropic).prod()
    eq_diams_um = eq_diams_pixels * voxel_isotropic[0]
    major_axes_um = major_axes_pixels * voxel_isotropic[0]
    minor_axes_um = minor_axes_pixels * voxel_isotropic[0]
    axis_ratios = major_axes_pixels / np.clip(minor_axes_pixels, 1, None)

    ave_volume_um3 = volumes_um3.mean()
    stdev_volume_um3 = volumes_um3.std()
    ave_eq_diam_um = eq_diams_um.mean()
    stdev_eq_diam_um = eq_diams_um.std()
    ave_major_axis_um = major_axes_um.mean()
    stdev_major_axis_um = major_axes_um.std()
    ave_minor_axis_um = minor_axes_um.mean()
    stdev_minor_axis_um = minor_axes_um.std()
    ave_axis_ratio = axis_ratios.mean()
    stdev_axis_ratio = axis_ratios.std()

    print(f'ave. ventricle volume (um3): {ave_volume_um3:.3f} ({stdev_volume_um3:.3f})')
    print(f'ave. ventricle equivalent diameter (um): {ave_eq_diam_um:.3f} ({stdev_eq_diam_um:.3f})')
    print(f'ave. ventricle major axis length (um): {ave_major_axis_um:.3f} ({stdev_major_axis_um:.3f})')
    print(f'ave. ventricle minor axis length (mm): {ave_minor_axis_um:.3f} ({stdev_minor_axis_um:.3f})')
    print(f'ave. ventricle axis ratio: {ave_axis_ratio:.3f} ({stdev_axis_ratio:.3f})')

    features[f'ventricle count'] = nb_ventricles
    features[f'ventricle volume mean (um3)'] = ave_volume_um3
    features[f'ventricle volume stdev (um3)'] = stdev_volume_um3
    features[f'ventricle equivalent diameter mean (um)'] = ave_eq_diam_um
    features[f'ventricle equivalent diameter stdev (um)'] = stdev_eq_diam_um
    features[f'ventricle major axis mean (um)'] = ave_major_axis_um
    features[f'ventricle major axis stdev (um)'] = stdev_major_axis_um
    features[f'ventricle minor axis mean (um)'] = ave_minor_axis_um
    features[f'ventricle minor axis stdev (um)'] = stdev_minor_axis_um
    features[f'ventricle axis ratio mean'] = ave_axis_ratio
    features[f'ventricle axis ratio stdev'] = stdev_axis_ratio

    # Distance to surface
    mask = foreground > 0
    verbose_print(args, f'Made foreground mask: {mask.shape}')

    # Find surface coordinates
    eroded = binary_erosion(mask)
    surface = np.logical_and(mask, np.logical_not(eroded))
    coords = np.asarray(np.where(surface)).T
    surface_points = coords * np.asarray(voxel_down)

    # Load cell centers
    centroids_um = np.load(os.path.join(args.input, 'dataset/centroids_um.npy'))

    # Query nearest surface point for each cell center
    print('Surface distances')
    nbrs = NearestNeighbors(n_neighbors=1).fit(surface_points)
    for n, niche_name in enumerate(niches):
        print('neighborhood', n)
        niche_idx = np.where(niche_labels == n)[0]  # This is an index into centoids_um
        niche_centroids_um = centroids_um[niche_idx]
        gate_labels_niche = gate_labels[niche_idx]
        for c, celltype_name in enumerate(celltypes):
            print('cell type', c)
            celltype_idx = np.where(gate_labels_niche[:, c] == 1)[0]
            if len(celltype_idx) > 0:
                niche_celltype_centroids_um = niche_centroids_um[celltype_idx]
                surface_dist, _ = nbrs.kneighbors(niche_celltype_centroids_um)
                ave_surface_dist = surface_dist.mean()
                stdev_surface_dist = surface_dist.std()
            else:
                ave_surface_dist = np.nan
                stdev_surface_dist = np.nan
            print(f'Ave. surface dist: {ave_surface_dist:.3f} ({stdev_surface_dist:.3f}, n = {len(celltype_idx)})')
            features[f'{niche_name} nbrhd, {celltype_name} surface distance mean (um)'] = ave_surface_dist
            features[f'{niche_name} nbrhd, {celltype_name} surface distance stdev (um)'] = stdev_surface_dist

    return features