def Workflow_cardio_npm1_100x( struct_img: np.ndarray, rescale_ratio: float = -1, output_type: str = "default", output_path: Union[str, Path] = None, fn: Union[str, Path] = None, output_func=None, ): """ classic segmentation workflow wrapper for structure Cardio NPM1 100x Parameter: ----------- struct_img: np.ndarray the 3D image to be segmented rescale_ratio: float an optional parameter to allow rescale the image before running the segmentation functions, default is no rescaling output_type: str select how to handle output. Currently, four types are supported: 1. default: the result will be saved at output_path whose filename is original name without extention + "_struct_segmentaiton.tiff" 2. array: the segmentation result will be simply returned as a numpy array 3. array_with_contour: segmentation result will be returned together with the contour of the segmentation 4. customize: pass in an extra output_func to do a special save. All the intermediate results, names of these results, the output_path, and the original filename (without extension) will be passed in to output_func. """ ########################################################################## # PARAMETERS: # note that these parameters are supposed to be fixed for the structure # and work well accross different datasets intensity_norm_param = [0.5, 2.5] gaussian_smoothing_sigma = 1 gaussian_smoothing_truncate_range = 3.0 dot_2d_sigma = 2 # dot_2d_sigma_extra = 1 # dot_2d_cutoff = 0.025 minArea = 1 low_level_min_size = 1000 ########################################################################## out_img_list = [] out_name_list = [] ################### # PRE_PROCESSING ################### # intenisty normalization (min/max) struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param) out_img_list.append(struct_img.copy()) out_name_list.append("im_norm") # rescale if needed if rescale_ratio > 0: struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio), order=2) struct_img = (struct_img - struct_img.min() + 1e-8) / (struct_img.max() - struct_img.min() + 1e-8) gaussian_smoothing_truncate_range = ( gaussian_smoothing_truncate_range * rescale_ratio) # smoothing with gaussian filter structure_img_smooth = image_smoothing_gaussian_3d( struct_img, sigma=gaussian_smoothing_sigma, truncate_range=gaussian_smoothing_truncate_range, ) out_img_list.append(structure_img_smooth.copy()) out_name_list.append("im_smooth") ################### # core algorithm ################### # step 1: low level thresholding # global_otsu = threshold_otsu(structure_img_smooth) global_tri = threshold_triangle(structure_img_smooth) global_median = np.percentile(structure_img_smooth, 50) th_low_level = (global_tri + global_median) / 2 # print(global_median) # print(global_tri) # print(th_low_level) # imsave('img_smooth.tiff', structure_img_smooth) bw_low_level = structure_img_smooth > th_low_level bw_low_level = remove_small_objects(bw_low_level, min_size=low_level_min_size, connectivity=1, in_place=True) bw_low_level = dilation(bw_low_level, selem=ball(2)) # step 2: high level thresholding local_cutoff = 0.333 * threshold_otsu(structure_img_smooth) bw_high_level = np.zeros_like(bw_low_level) lab_low, num_obj = label(bw_low_level, return_num=True, connectivity=1) for idx in range(num_obj): single_obj = lab_low == (idx + 1) local_otsu = threshold_otsu(structure_img_smooth[single_obj]) if local_otsu > local_cutoff: bw_high_level[np.logical_and( structure_img_smooth > 1.2 * local_otsu, single_obj)] = 1 # imsave('seg_coarse.tiff', bw_high_level.astype(np.uint8)) out_img_list.append(bw_high_level.copy()) out_name_list.append("bw_coarse") response_bright = dot_slice_by_slice(structure_img_smooth, log_sigma=dot_2d_sigma) bw_extra = response_bright > 0.03 # dot_2d_cutoff bw_extra[~bw_low_level] = 0 bw_final = np.logical_or(bw_extra, bw_high_level) # bw_final[holes]=0 ################### # POST-PROCESSING ################### seg = remove_small_objects(bw_final, min_size=minArea, connectivity=1, in_place=True) # output seg = seg > 0 seg = seg.astype(np.uint8) seg[seg > 0] = 255 out_img_list.append(seg.copy()) out_name_list.append("bw_final") if output_type == "default": # the default final output, simply save it to the output path save_segmentation(seg, False, Path(output_path), fn) elif output_type == "customize": # the hook for passing in a customized output function # use "out_img_list" and "out_name_list" in your hook to # customize your output functions output_func(out_img_list, out_name_list, Path(output_path), fn) elif output_type == "array": return seg elif output_type == "array_with_contour": return (seg, generate_segmentation_contour(seg)) else: raise NotImplementedError("invalid output type: {output_type}")
def Workflow_cardio_npm1_100x(struct_img, rescale_ratio, output_type, output_path, fn, output_func=None): ########################################################################## # PARAMETERS: # note that these parameters are supposed to be fixed for the structure # and work well accross different datasets intensity_norm_param = [0.5, 2.5] gaussian_smoothing_sigma = 1 gaussian_smoothing_truncate_range = 3.0 dot_2d_sigma = 2 dot_2d_sigma_extra = 1 dot_2d_cutoff = 0.025 minArea = 1 low_level_min_size = 1000 ########################################################################## out_img_list = [] out_name_list = [] ################### # PRE_PROCESSING ################### # intenisty normalization (min/max) struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param) out_img_list.append(struct_img.copy()) out_name_list.append('im_norm') # rescale if needed if rescale_ratio > 0: struct_img = resize(struct_img, [1, rescale_ratio, rescale_ratio], method="cubic") struct_img = (struct_img - struct_img.min() + 1e-8) / (struct_img.max() - struct_img.min() + 1e-8) gaussian_smoothing_truncate_range = gaussian_smoothing_truncate_range * rescale_ratio # smoothing with gaussian filter structure_img_smooth = image_smoothing_gaussian_3d( struct_img, sigma=gaussian_smoothing_sigma, truncate_range=gaussian_smoothing_truncate_range) out_img_list.append(structure_img_smooth.copy()) out_name_list.append('im_smooth') ################### # core algorithm ################### # step 1: low level thresholding #global_otsu = threshold_otsu(structure_img_smooth) global_tri = threshold_triangle(structure_img_smooth) global_median = np.percentile(structure_img_smooth, 50) th_low_level = (global_tri + global_median) / 2 #print(global_median) #print(global_tri) #print(th_low_level) #imsave('img_smooth.tiff', structure_img_smooth) bw_low_level = structure_img_smooth > th_low_level bw_low_level = remove_small_objects(bw_low_level, min_size=low_level_min_size, connectivity=1, in_place=True) bw_low_level = dilation(bw_low_level, selem=ball(2)) # step 2: high level thresholding local_cutoff = 0.333 * threshold_otsu(structure_img_smooth) bw_high_level = np.zeros_like(bw_low_level) lab_low, num_obj = label(bw_low_level, return_num=True, connectivity=1) for idx in range(num_obj): single_obj = (lab_low == (idx + 1)) local_otsu = threshold_otsu(structure_img_smooth[single_obj]) if local_otsu > local_cutoff: bw_high_level[np.logical_and( structure_img_smooth > 1.2 * local_otsu, single_obj)] = 1 #imsave('seg_coarse.tiff', bw_high_level.astype(np.uint8)) out_img_list.append(bw_high_level.copy()) out_name_list.append('bw_coarse') response_bright = dot_slice_by_slice(structure_img_smooth, log_sigma=dot_2d_sigma) ''' response_dark = dot_slice_by_slice(1 - structure_img_smooth, log_sigma=dot_2d_sigma) response_dark_extra = dot_slice_by_slice(1 - structure_img_smooth, log_sigma=dot_2d_sigma_extra) imsave('res_dark_1.tiff', response_dark) imsave('res_dark_2.tiff', response_dark_extra) #inner_mask = bw_high_level.copy() #for zz in range(inner_mask.shape[0]): # inner_mask[zz,:,:] = binary_fill_holes(inner_mask[zz,:,:]) holes = np.logical_or(response_dark>dot_2d_cutoff , response_dark_extra>dot_2d_cutoff) #holes[~inner_mask] = 0 ''' bw_extra = response_bright > 0.03 # dot_2d_cutoff bw_extra[~bw_low_level] = 0 bw_final = np.logical_or(bw_extra, bw_high_level) #bw_final[holes]=0 ################### # POST-PROCESSING ################### seg = remove_small_objects(bw_final, min_size=minArea, connectivity=1, in_place=True) # output seg = seg > 0 seg = seg.astype(np.uint8) seg[seg > 0] = 255 out_img_list.append(seg.copy()) out_name_list.append('bw_final') if output_type == 'default': # the default final output save_segmentation(seg, False, output_path, fn) elif output_type == 'AICS_pipeline': # pre-defined output function for pipeline data save_segmentation(seg, True, output_path, fn) elif output_type == 'customize': # the hook for passing in a customized output function output_fun(out_img_list, out_name_list, output_path, fn) elif output_type == 'array': return seg elif output_type == 'array_with_contour': return (seg, generate_segmentation_contour(seg)) else: # the hook for pre-defined RnD output functions (AICS internal) img_list, name_list = NPM1_output(out_img_list, out_name_list, output_type, output_path, fn) if output_type == 'QCB': return img_list, name_list
def Workflow_ctnnb1( struct_img: np.ndarray, rescale_ratio: float = -1, output_type: str = "default", output_path: Union[str, Path] = None, fn: Union[str, Path] = None, output_func=None, ): """ classic segmentation workflow wrapper for structure CTNNB1 Parameter: ----------- struct_img: np.ndarray the 3D image to be segmented rescale_ratio: float an optional parameter to allow rescale the image before running the segmentation functions, default is no rescaling output_type: str select how to handle output. Currently, four types are supported: 1. default: the result will be saved at output_path whose filename is original name without extention + "_struct_segmentaiton.tiff" 2. array: the segmentation result will be simply returned as a numpy array 3. array_with_contour: segmentation result will be returned together with the contour of the segmentation 4. customize: pass in an extra output_func to do a special save. All the intermediate results, names of these results, the output_path, and the original filename (without extension) will be passed in to output_func. """ ########################################################################## # PARAMETERS: # note that these parameters are supposed to be fixed for the structure # and work well accross different datasets intensity_norm_param = [4, 27] gaussian_smoothing_sigma = 1 gaussian_smoothing_truncate_range = 3.0 dot_2d_sigma = 1.5 dot_2d_cutoff = 0.01 minArea = 10 ########################################################################## out_img_list = [] out_name_list = [] ################### # PRE_PROCESSING ################### # intenisty normalization (min/max) struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param) out_img_list.append(struct_img.copy()) out_name_list.append("im_norm") # rescale if needed if rescale_ratio > 0: struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio), order=2) struct_img = (struct_img - struct_img.min() + 1e-8) / (struct_img.max() - struct_img.min() + 1e-8) gaussian_smoothing_truncate_range = ( gaussian_smoothing_truncate_range * rescale_ratio) # smoothing structure_img_smooth = image_smoothing_gaussian_3d( struct_img, sigma=gaussian_smoothing_sigma, truncate_range=gaussian_smoothing_truncate_range, ) out_img_list.append(structure_img_smooth.copy()) out_name_list.append("im_smooth") ################### # core algorithm ################### response = dot_slice_by_slice(structure_img_smooth, log_sigma=dot_2d_sigma) bw = response > dot_2d_cutoff ################### # POST-PROCESSING ################### seg = remove_small_objects(bw, min_size=minArea, connectivity=1, in_place=False) # output seg = seg > 0 seg = seg.astype(np.uint8) seg[seg > 0] = 255 out_img_list.append(seg.copy()) out_name_list.append("bw_final") if output_type == "default": # the default final output, simply save it to the output path save_segmentation(seg, False, Path(output_path), fn) elif output_type == "customize": # the hook for passing in a customized output function # use "out_img_list" and "out_name_list" in your hook to # customize your output functions output_func(out_img_list, out_name_list, Path(output_path), fn) elif output_type == "array": return seg elif output_type == "array_with_contour": return (seg, generate_segmentation_contour(seg)) else: raise NotImplementedError("invalid output type: {output_type}")
def Workflow_lamp1( struct_img: np.ndarray, rescale_ratio: float = -1, output_type: str = "default", output_path: Union[str, Path] = None, fn: Union[str, Path] = None, output_func=None, ): """ classic segmentation workflow wrapper for structure LAMP1 Parameter: ----------- struct_img: np.ndarray the 3D image to be segmented rescale_ratio: float an optional parameter to allow rescale the image before running the segmentation functions, default is no rescaling output_type: str select how to handle output. Currently, four types are supported: 1. default: the result will be saved at output_path whose filename is original name without extention + "_struct_segmentaiton.tiff" 2. array: the segmentation result will be simply returned as a numpy array 3. array_with_contour: segmentation result will be returned together with the contour of the segmentation 4. customize: pass in an extra output_func to do a special save. All the intermediate results, names of these results, the output_path, and the original filename (without extension) will be passed in to output_func. """ ########################################################################## # PARAMETERS: # note that these parameters are supposed to be fixed for the structure # and work well accross different datasets intensity_scaling_param = [3, 19] gaussian_smoothing_sigma = 1 gaussian_smoothing_truncate_range = 3.0 minArea = 15 # ves_th_2d = 0.1 vesselness_sigma = [1] vesselness_cutoff = 0.15 # hole_min = 60 hole_max = 1600 log_sigma_1 = 5 log_cutoff_1 = 0.09 log_sigma_2 = 2.5 log_cutoff_2 = 0.07 log_sigma_3 = 1 log_cutoff_3 = 0.01 ########################################################################## out_img_list = [] out_name_list = [] # intenisty normalization struct_img = intensity_normalization( struct_img, scaling_param=intensity_scaling_param ) out_img_list.append(struct_img.copy()) out_name_list.append("im_norm") if rescale_ratio > 0: struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio), order=2) struct_img = (struct_img - struct_img.min() + 1e-8) / ( struct_img.max() - struct_img.min() + 1e-8 ) gaussian_smoothing_truncate_range = ( gaussian_smoothing_truncate_range * rescale_ratio ) structure_img_smooth = image_smoothing_gaussian_slice_by_slice( struct_img, sigma=gaussian_smoothing_sigma, truncate_range=gaussian_smoothing_truncate_range, ) out_img_list.append(structure_img_smooth.copy()) out_name_list.append("im_smooth") # spot detection response1 = dot_slice_by_slice(structure_img_smooth, log_sigma=log_sigma_1) bw1 = response1 > log_cutoff_1 response2 = dot_slice_by_slice(structure_img_smooth, log_sigma=log_sigma_2) bw2 = response2 > log_cutoff_2 bw_spot = np.logical_or(bw1, bw2) response3 = dot_slice_by_slice(structure_img_smooth, log_sigma=log_sigma_3) bw3 = response3 > log_cutoff_3 bw_spot = np.logical_or(bw_spot, bw3) # ring/filament detection ves = vesselnessSliceBySlice( structure_img_smooth, sigmas=vesselness_sigma, tau=1, whiteonblack=True ) bw_ves = ves > vesselness_cutoff # fill holes partial_fill = np.logical_or(bw_spot, bw_ves) out_img_list.append(partial_fill.copy()) out_name_list.append("interm_before_hole") holes = np.zeros_like(partial_fill) for zz in range(partial_fill.shape[0]): background_lab = label(~partial_fill[zz, :, :], connectivity=1) out = np.copy(background_lab) component_sizes = np.bincount(background_lab.ravel()) too_big = component_sizes > hole_max too_big_mask = too_big[background_lab] out[too_big_mask] = 0 # too_small = component_sizes <hole_min # too_small_mask = too_small[background_lab] # out[too_small_mask] = 0 holes[zz, :, :] = out full_fill = np.logical_or(partial_fill, holes) seg = remove_small_objects( full_fill, min_size=minArea, connectivity=1, in_place=False ) # output seg = seg > 0 seg = seg.astype(np.uint8) seg[seg > 0] = 255 out_img_list.append(seg.copy()) out_name_list.append("bw_final") if output_type == "default": # the default final output, simply save it to the output path save_segmentation(seg, False, Path(output_path), fn) elif output_type == "customize": # the hook for passing in a customized output function # use "out_img_list" and "out_name_list" in your hook to # customize your output functions output_func(out_img_list, out_name_list, Path(output_path), fn) elif output_type == "array": return seg elif output_type == "array_with_contour": return (seg, generate_segmentation_contour(seg)) else: raise NotImplementedError("invalid output type: {output_type}")