def foreground_main(args): verbose_print(args, f'Segmenting foreground from {args.input}') # Load the input image data = io.imread(args.input) # Smoothing if args.g is not None: data = gaussian_blur(data, args.g).astype(data.dtype) # Threshold image foreground = (data > args.t) # .astype(np.uint8) # Fill holes # This is done slice-by-slice for now since there could be imaging problems where # a part of a ventricle is actually in the image at z = 0 or z = -1 output = np.empty(foreground.shape, dtype=np.uint8) for i, img in enumerate(foreground): output[i] = binary_fill_holes(img) output *= 255 # Save the result to TIFF io.imsave(args.output, output, compress=3) verbose_print(args, f'Segmentation written to {args.output}') verbose_print(args, f'Foreground segmentation done!')
def read_downsample_write(path, factor, output_dir, filename, compress=1): arr = io.imread(path) if isinstance(factor, int): factors = tuple(factor for _ in range(arr.ndim)) else: factors = tuple(factor) data = downsample(arr, factors) output = os.path.join(output_dir, filename) io.imsave(output, data, compress=compress)
def rescale_image(path, threshold, max_val, output, filename, compress): img = io.imread(path).astype(np.float32) # load image as float # Subtract threshold and remove background by clipping negative values img -= threshold img = np.clip(img, 0, None) # Divide by max_val (accounting for threshold) to scale to [0, 1] img = img / (max_val - threshold) # Save result output_path = os.path.join(output, filename) io.imsave(output_path, img, compress=compress)
def stack_main(args): verbose_print(args, f'Stacking images in {args.input}') paths, filenames = utils.tifs_in_dir(args.input) verbose_print(args, f'Found {len(paths)} images') img0 = io.imread(paths[0]) shape2d, dtype = img0.shape, img0.dtype img = np.empty((len(paths), *shape2d), dtype) for z, path in tqdm(enumerate(paths), total=len(paths)): img[z] = io.imread(path) io.imsave(args.output, img, compress=1) verbose_print(args, f'Stacking done!')
def ventricle_main(args): verbose_print(args, f'Segmenting ventricles in {args.input}') # Load the input image data = io.imread(args.input) # Load the model if args.model.endswith('.pt'): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") model = load_model(args.model, device) model = model.eval() verbose_print( args, f'Pytorch model successfully loaded from {args.model} to {device} device' ) # Segment the input image verbose_print(args, f'Segmentation progress:') output = segment_ventricles(model, data, args.t, device) elif args.model.endswith('.h5'): model = load_keras_model(args.model) verbose_print(args, f'Kerass model successfully loaded from {args.model}') # Segment the input image verbose_print(args, f'Segmentation progress:') output = segment_ventricles_keras(model, data, args.t) # Remove border regions if args.exclude_border: verbose_print(args, f'Removing regions connected to image border') # This could also be done in 3D instead of slice-by-slice # I'm not sure if images will start in ventricle, so doing slice-by-slice to be safe img = np.zeros_like(output) for i, data in tqdm(enumerate(output), total=len(output)): img[i] = clear_border(data) output = img # Save the result to TIFF io.imsave(args.output, output, compress=3) verbose_print(args, f'Segmentation written to {args.output}') verbose_print(args, f'Ventricle segmentation done!')
def downsample_main(args): if args.n is None: nb_workers = multiprocessing.cpu_count() else: nb_workers = args.n verbose_print(args, f'Downsampling {args.input} with factors {args.factor}') if args.tiff: os.makedirs(args.output, exist_ok=True) paths, filenames = utils.tifs_in_dir(args.input) args_list = [] for path, filename in zip(paths, filenames): args_list.append((path, args.factor, args.output, filename)) with multiprocessing.Pool(nb_workers) as pool: pool.starmap(read_downsample_write, args_list) # for i, (path, filename) in enumerate(zip(paths, filenames)): # verbose_print(args, f'Downsampling {filename}') # arr = io.imread(path) # if isinstance(args.factor, int): # factors = tuple(args.factor for _ in range(arr.ndim)) # else: # factors = tuple(args.factor) # data = downsample(arr, factors) # output = os.path.join(args.output, filename) # io.imsave(output, data, compress=3) else: arr = io.open(args.input, mode='r') if isinstance(args.factor, int): factors = tuple(args.factor for _ in range(arr.ndim)) else: factors = tuple(args.factor) data = downsample(arr, factors) verbose_print(args, f'Writing result to {args.output}') io.imsave(args.output, data, compress=3) verbose_print(args, f'Downsampling done!')
def contrast_main(args): # Initial setup nb_workers = _check_workers(args) if args.k is None: verbose_print(args, f"Performing histogram equalization with default kernel size") kernel_size = None else: verbose_print(args, f"Performing histogram equalization with kernel size {args.k}") kernel_size = args.k # Find all TIFFs paths, filenames = tifs_in_dir(args.input) verbose_print(args, f"Found {len(paths)} TIFFs") # Make output folder os.makedirs(args.output, exist_ok=True) for path, filename in tqdm.tqdm(zip(paths, filenames), total=len(paths)): img = io.imread(path) adjusted = equalize_adapthist(img, kernel_size=kernel_size).astype(np.float32) io.imsave(os.path.join(args.output, filename), adjusted, compress=args.c) verbose_print(args, f"Contrast done!")
def process_write(img, f, output, compress=3): result = f(img) io.imsave(output, result, compress=compress)