def main(image, clipping_mask, plot=False): '''Clips a labeled image using another image as a mask, such that intersecting pixels/voxels are set to background. Parameters ---------- image: numpy.ndarray image that should be clipped clipping_mask: numpy.ndarray[numpy.int32 or numpy.bool] image that should be used as clipping mask plot: bool, optional whether a plot should be generated (default: ``False``) Returns ------- jtmodules.clip_objects.Output Raises ------ ValueError when `image` and `clipping_mask` don't have the same dimensions ''' if image.shape != clipping_mask.shape: raise ValueError( '"image" and "clipping_mask" must have the same dimensions') clipping_mask = clipping_mask > 0 clipped_image = image.copy() clipped_image[clipping_mask] = 0 if plot: from jtlib import plotting if str(image.dtype).startswith('uint'): plots = [ plotting.create_intensity_image_plot(image, 'ul', clip=True), plotting.create_mask_image_plot(clipping_mask, 'ur'), plotting.create_intensity_image_plot(clipped_image, 'll', clip=True) ] else: n_objects = len(np.unique(image)[1:]) colorscale = plotting.create_colorscale('Spectral', n=n_objects, permute=True, add_background=True) plots = [ plotting.create_mask_image_plot(image, 'ul', colorscale=colorscale), plotting.create_mask_image_plot(clipping_mask, 'ur'), plotting.create_mask_image_plot(clipped_image, 'll', colorscale=colorscale) ] figure = plotting.create_figure(plots, title='clipped image') else: figure = str() return Output(clipped_image, figure)
def main(image, mask, plot=False, plot_type='objects'): mask_image = np.copy(image) mask_image[mask == 0] = 0 if plot: logger.info('create plot') from jtlib import plotting if plot_type == 'objects': colorscale = plotting.create_colorscale('Spectral', n=image.max(), permute=True, add_background=True) data = [ plotting.create_mask_image_plot(mask, 'ul', colorscale=colorscale), plotting.create_mask_image_plot(mask_image, 'ur', colorscale=colorscale) ] figure = plotting.create_figure(data, title='Masked label image') elif plot_type == 'intensity': data = [ plotting.create_mask_image_plot(mask, 'ul'), plotting.create_intensity_image_plot(mask_image, 'ur') ] figure = plotting.create_figure(data, title='Masked intensity image') else: figure = str() return Output(mask_image, figure)
def main(image, n, plot=False): '''Expands objects in `image` by `n` pixels along each axis. Parameters ---------- image: numpy.ndarray[numpy.int32] 2D label image with objects that should be expanded or shrunk n: int number of pixels by which each connected component should be expanded or shrunk plot: bool, optional whether a plot should be generated (default: ``False``) Returns ------- jtmodules.expand_objects.Output ''' # NOTE: code from CellProfiler module "expandorshrink" # NOTE (S.B. 25.1.2018): renamed from "expand" to "expand_or_shrink" expanded_image = image.copy() if (n > 0): logger.info('expanding objects by %d pixels', n) background = image == 0 distance, (i, j) = ndi.distance_transform_edt(background, return_indices=True) mask = background & (distance < n) expanded_image[mask] = image[i[mask], j[mask]] elif (n < 0): logger.info('shrinking objects by %d pixels', abs(n)) print 'shrinking' objects = image != 0 distance = ndi.distance_transform_edt(objects, return_indices=False) mask = np.invert(distance > abs(n)) expanded_image[mask] = 0 if plot: from jtlib import plotting n_objects = len(np.unique(expanded_image)[1:]) colorscale = plotting.create_colorscale('Spectral', n=n_objects, permute=True, add_background=True) plots = [ plotting.create_mask_image_plot(image, 'ul', colorscale=colorscale), plotting.create_mask_image_plot(expanded_image, 'ur', colorscale=colorscale) ] figure = plotting.create_figure(plots, title='expanded image') else: figure = str() return Output(expanded_image, figure)
def main(image, plot=False): if plot: logger.info('create plot') from jtlib import plotting colorscale = plotting.create_colorscale('Spectral', n=image.max(), permute=True, add_background=True) data = [ plotting.create_mask_image_plot(image, 'ul', colorscale=colorscale) ] figure = plotting.create_figure( data, title='LabelImage with "{0}" objects'.format(image.max())) else: figure = str() return Output(figure)
def main(image, n, plot=False): '''Expands objects in `image` by `n` pixels along each axis. Parameters ---------- image: numpy.ndarray[numpy.int32] 2D label image with objects that should be expanded n: int number of pixels by which each connected component should be expanded plot: bool, optional whether a plot should be generated (default: ``False``) Returns ------- jtmodules.expand_objects.Output ''' # NOTE: code from CellProfiler module "expandorshrink" background = image == 0 distance, (i, j) = distance_transform_edt(background, return_indices=True) expanded_image = image.copy() mask = background & (distance < n) expanded_image[mask] = image[i[mask], j[mask]] if plot: from jtlib import plotting n_objects = len(np.unique(expanded_image)[1:]) colorscale = plotting.create_colorscale( 'Spectral', n=n_objects, permute=True, add_background=True ) plots = [ plotting.create_mask_image_plot( image, 'ul', colorscale=colorscale ), plotting.create_mask_image_plot( expanded_image, 'ur', colorscale=colorscale ) ] figure = plotting.create_figure(plots, title='expanded image') else: figure = str() return Output(expanded_image, figure)
def main(image, mask, plot=False, plot_type='objects'): mask_image = np.copy(image) mask_image[mask == 0] = 0 if plot: logger.info('create plot') from jtlib import plotting if plot_type == 'objects': colorscale = plotting.create_colorscale( 'Spectral', n=image.max(), permute=True, add_background=True ) data = [ plotting.create_mask_image_plot( mask, 'ul', colorscale=colorscale ), plotting.create_mask_image_plot( mask_image, 'ur', colorscale=colorscale ) ] figure = plotting.create_figure( data, title='Masked label image' ) elif plot_type == 'intensity': data = [ plotting.create_mask_image_plot( mask, 'ul' ), plotting.create_intensity_image_plot( mask_image, 'ur' ) ] figure = plotting.create_figure( data, title='Masked intensity image' ) else: figure = str() return Output(mask_image, figure)
def main(image, output_type='16-bit', plot=False): '''Converts an arbitrary Image to an IntensityImage Parameters ---------- image: numpy.ndarray image to be converted output_type: numpy.ndarray output data type plot: bool, optional whether a plot should be generated (default: ``False``) Returns ------- jtmodules.convert_to_intensity.Output ''' if output_type == '8-bit': bit_depth = np.uint8 max_value = pow(2, 8) elif output_type == '16-bit': bit_depth = np.uint16 max_value = pow(2, 16) else: logger.warn('unrecognised requested output data-type %s, using 16-bit', output_type) bit_depth = np.uint16 max_value = pow(2, 16) if image.dtype == np.int32: logger.info('Converting label image to intensity image') if (np.amax(image) < max_value): intensity_image = image.astype(dtype=bit_depth) else: logger.warn( '%d objects in input label image exceeds maximum (%d)', np.amax(image), max_value ) intensity_image = image else: logger.info('Converting non-label image to intensity image') intensity_image = image.astype(dtype=bit_depth) if plot: from jtlib import plotting n_objects = len(np.unique(image)[1:]) colorscale = plotting.create_colorscale( 'Spectral', n=n_objects, permute=True, add_background=True ) plots = [ plotting.create_mask_image_plot( image, 'ul', colorscale=colorscale ), plotting.create_intensity_image_plot( intensity_image, 'ur' ) ] figure = plotting.create_figure(plots, title='convert_to_intensity_image') else: figure = str() return Output(intensity_image, figure)
def main(mask, intensity_image, min_area, max_area, min_cut_area, max_circularity, max_convexity, plot=False, selection_test_mode=False): '''Detects clumps in `mask` given criteria provided by the user and cuts them along the borders of watershed regions, which are determined based on the distance transform of `mask`. Parameters ---------- mask: numpy.ndarray[Union[numpy.int32, numpy.bool]] 2D binary or labele image encoding potential clumps intensity_image: numpy.ndarray[numpy.uint8 or numpy.uint16] 2D grayscale image with intensity values of the objects that should be detected min_area: int minimal area an object must have to be considered a clump max_area: int maximal area an object can have to be considered a clump min_cut_area: int minimal area an object must have (useful to prevent cuts that would result in too small objects) max_circularity: float maximal circularity an object can have to be considerd a clump max_convexity: float maximal convexity an object can have to be considerd a clump plot: bool, optional whether a plot should be generated selection_test_mode: bool, optional whether, instead of the normal plot, heatmaps should be generated that display values of the selection criteria *area*, *circularity* and *convexity* for each individual object in `mask` as well as the selected "clumps" based on the criteria provided by the user Returns ------- jtmodules.separate_clumps.Output ''' separated_mask = separate_clumped_objects(mask, min_cut_area, min_area, max_area, max_circularity, max_convexity) if plot: from jtlib import plotting if selection_test_mode: logger.info('create plot for selection test mode') labeled_mask, n_objects = mh.label(mask) f = Morphology(labeled_mask) values = f.extract() area_img = create_feature_image(values['Morphology_Area'].values, labeled_mask) convexity_img = create_feature_image( values['Morphology_Convexity'].values, labeled_mask) circularity_img = create_feature_image( values['Morphology_Circularity'].values, labeled_mask) area_colorscale = plotting.create_colorscale( 'Greens', n_objects, add_background=True, background_color='white') circularity_colorscale = plotting.create_colorscale( 'Blues', n_objects, add_background=True, background_color='white') convexity_colorscale = plotting.create_colorscale( 'Reds', n_objects, add_background=True, background_color='white') plots = [ plotting.create_float_image_plot(area_img, 'ul', colorscale=area_colorscale), plotting.create_float_image_plot( convexity_img, 'ur', colorscale=convexity_colorscale), plotting.create_float_image_plot( circularity_img, 'll', colorscale=circularity_colorscale), plotting.create_mask_image_plot(clumps_mask, 'lr'), ] figure = plotting.create_figure( plots, title=('Selection criteria: "area" (green), "convexity" (red) ' 'and "circularity" (blue)')) else: logger.info('create plot') cut_mask = (mask > 0) - (separated_mask > 0) clumps_mask = np.zeros(mask.shape, bool) initial_objects_label_image, n_initial_objects = mh.label(mask > 0) for i in range(1, n_initial_objects + 1): index = initial_objects_label_image == i if len(np.unique(separated_mask[index])) > 1: clumps_mask[index] = True n_objects = len(np.unique(separated_mask[separated_mask > 0])) colorscale = plotting.create_colorscale('Spectral', n=n_objects, permute=True, add_background=True) outlines = mh.morph.dilate(mh.labeled.bwperim(separated_mask > 0)) cutlines = mh.morph.dilate(mh.labeled.bwperim(cut_mask)) plots = [ plotting.create_mask_image_plot(separated_mask, 'ul', colorscale=colorscale), plotting.create_intensity_overlay_image_plot( intensity_image, outlines, 'ur'), plotting.create_mask_overlay_image_plot( clumps_mask, cutlines, 'll') ] figure = plotting.create_figure(plots, title='separated clumps') else: figure = str() return Output(separated_mask, figure)
def main(image, mask, threshold=1, min_area=3, mean_area=5, max_area=1000, clip_percentile=99.999, plot=False): '''Detects blobs in `image` using an implementation of `SExtractor <http://www.astromatic.net/software/sextractor>`_ [1]. The `image` is first convolved with a Laplacian of Gaussian filter of size `mean_area` to enhance blob-like structures. The enhanced image is then thresholded at `threshold` level and connected pixel components are subsequently deplended. Parameters ---------- image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]] grayscale image in which blobs should be detected mask: numpy.ndarray[Union[numpy.int32, numpy.bool]] binary or labeled image that specifies pixel regions of interest in which blobs should be detected threshold: int, optional threshold level for pixel values in the convolved image (default: ``1``) min_area: int, optional minimal size a blob is allowed to have (default: ``3``) mean_area: int, optional estimated average size of a blob (default: ``5``) max_area: int, optional maximal size a blob is allowed to have to be subject to deblending; no attempt will be made to deblend blobs larger than `max_area` (default: ``100``) clip_percentile: float, optional clip intensity values in `image` above the given percentile; this may help in attenuating artifacts plot: bool, optional whether a plot should be generated (default: ``False``) Returns ------- jtmodules.detect_blobs.Output[Union[numpy.ndarray, str]] References ---------- .. [1] Bertin, E. & Arnouts, S. 1996: SExtractor: Software for source extraction, Astronomy & Astrophysics Supplement 317, 393 ''' logger.info('detect blobs above threshold {0}'.format(threshold)) detect_image = image.copy() p = np.percentile(image, clip_percentile) detect_image[image > p] = p # Enhance the image for blob detection by convoling it with a LOG filter f = -1 * log_2d(size=mean_area, sigma=float(mean_area - 1) / 3) detect_image = mh.convolve(detect_image.astype(float), f) detect_image[detect_image < 0] = 0 # Mask regions of too big blobs pre_blobs = mh.label(detect_image > threshold)[0] bad_blobs, n_bad = mh.labeled.filter_labeled(pre_blobs, min_size=max_area) logger.info( 'remove {0} blobs because they are bigger than {1} pixels'.format( n_bad, max_area)) detect_mask = np.invert(mask > 0) detect_mask[bad_blobs > 0] = True detect_image[bad_blobs > 0] = 0 logger.info('deblend blobs') blobs, centroids = detect_blobs(image=detect_image, mask=detect_mask, threshold=threshold, min_area=min_area) n = len(np.unique(blobs[blobs > 0])) logger.info('{0} blobs detected'.format(n)) if plot: logger.info('create plot') from jtlib import plotting colorscale = plotting.create_colorscale('Spectral', n=n, permute=True, add_background=True) plots = [ plotting.create_float_image_plot(detect_image, 'ul', clip=True), plotting.create_mask_image_plot(blobs, 'ur', colorscale=colorscale) ] figure = plotting.create_figure( plots, title=('detected #{0} blobs above threshold {1}' ' in LOG filtered image'.format(n, threshold))) else: figure = str() return Output(centroids, blobs, figure)
def main(mask, intensity_image, min_area, max_area, min_cut_area, max_circularity, max_convexity, plot=False, selection_test_mode=False): '''Detects clumps in `mask` given criteria provided by the user and cuts them along the borders of watershed regions, which are determined based on the distance transform of `mask`. Parameters ---------- mask: numpy.ndarray[Union[numpy.int32, numpy.bool]] 2D binary or labele image encoding potential clumps intensity_image: numpy.ndarray[numpy.uint8 or numpy.uint16] 2D grayscale image with intensity values of the objects that should be detected min_area: int minimal area an object must have to be considered a clump max_area: int maximal area an object can have to be considered a clump min_cut_area: int minimal area an object must have (useful to prevent cuts that would result in too small objects) max_circularity: float maximal circularity an object can have to be considerd a clump max_convexity: float maximal convexity an object can have to be considerd a clump plot: bool, optional whether a plot should be generated selection_test_mode: bool, optional whether, instead of the normal plot, heatmaps should be generated that display values of the selection criteria *area*, *circularity* and *convexity* for each individual object in `mask` as well as the selected "clumps" based on the criteria provided by the user Returns ------- jtmodules.separate_clumps.Output ''' separated_mask = separate_clumped_objects( mask, min_cut_area, min_area, max_area, max_circularity, max_convexity ) if plot: from jtlib import plotting if selection_test_mode: logger.info('create plot for selection test mode') labeled_mask, n_objects = mh.label(mask) f = Morphology(labeled_mask) values = f.extract() area_img = create_feature_image( values['Morphology_Area'].values, labeled_mask ) convexity_img = create_feature_image( values['Morphology_Convexity'].values, labeled_mask ) circularity_img = create_feature_image( values['Morphology_Circularity'].values, labeled_mask ) area_colorscale = plotting.create_colorscale( 'Greens', n_objects, add_background=True, background_color='white' ) circularity_colorscale = plotting.create_colorscale( 'Blues', n_objects, add_background=True, background_color='white' ) convexity_colorscale = plotting.create_colorscale( 'Reds', n_objects, add_background=True, background_color='white' ) plots = [ plotting.create_float_image_plot( area_img, 'ul', colorscale=area_colorscale ), plotting.create_float_image_plot( convexity_img, 'ur', colorscale=convexity_colorscale ), plotting.create_float_image_plot( circularity_img, 'll', colorscale=circularity_colorscale ), plotting.create_mask_image_plot( clumps_mask, 'lr' ), ] figure = plotting.create_figure( plots, title=( 'Selection criteria: "area" (green), "convexity" (red) ' 'and "circularity" (blue)' ) ) else: logger.info('create plot') cut_mask = (mask > 0) - (separated_mask > 0) clumps_mask = np.zeros(mask.shape, bool) initial_objects_label_image, n_initial_objects = mh.label(mask > 0) for i in range(1, n_initial_objects+1): index = initial_objects_label_image == i if len(np.unique(separated_mask[index])) > 1: clumps_mask[index] = True n_objects = len(np.unique(separated_mask[separated_mask > 0])) colorscale = plotting.create_colorscale( 'Spectral', n=n_objects, permute=True, add_background=True ) outlines = mh.morph.dilate(mh.labeled.bwperim(separated_mask > 0)) cutlines = mh.morph.dilate(mh.labeled.bwperim(cut_mask)) plots = [ plotting.create_mask_image_plot( separated_mask, 'ul', colorscale=colorscale ), plotting.create_intensity_overlay_image_plot( intensity_image, outlines, 'ur' ), plotting.create_mask_overlay_image_plot( clumps_mask, cutlines, 'll' ) ] figure = plotting.create_figure( plots, title='separated clumps' ) else: figure = str() return Output(separated_mask, figure)
def main(mask, intensity_image, min_area, max_area, min_cut_area, max_circularity, max_convexity, plot=False, selection_test_mode=False, selection_test_show_remaining=False, trimming=True): '''Detects clumps in `mask` given criteria provided by the user and cuts them along the borders of watershed regions, which are determined based on the distance transform of `mask`. Parameters ---------- mask: numpy.ndarray[Union[numpy.int32, numpy.bool]] 2D binary or labele image encoding potential clumps intensity_image: numpy.ndarray[numpy.uint8 or numpy.uint16] 2D grayscale image with intensity values of the objects that should be detected min_area: int minimal area an object must have to be considered a clump max_area: int maximal area an object can have to be considered a clump min_cut_area: int minimal area an object must have (useful to prevent cuts that would result in too small objects) max_circularity: float maximal circularity an object can have to be considerd a clump max_convexity: float maximal convexity an object can have to be considerd a clump plot: bool, optional whether a plot should be generated selection_test_mode: bool, optional whether, instead of the normal plot, heatmaps should be generated that display values of the selection criteria *area*, *circularity* and *convexity* for each individual object in `mask` as well as the selected "clumps" based on the criteria provided by the user selection_test_show_remaining: bool, optional whether the selection test plot should be made on the remaining image after the cuts were performed (helps to see why some objects were not cut, especially if there are complicated clumps that require multiple cuts). Defaults to false, thus showing the values in the original image trimming: bool some cuts may create a tiny third object. If this boolean is true, tertiary objects < trimming_threshold (10) pixels will be removed Returns ------- jtmodules.separate_clumps.Output ''' separated_label_image = separate_clumped_objects(mask, min_cut_area, min_area, max_area, max_circularity, max_convexity, allow_trimming=trimming) if plot: from jtlib import plotting clumps_mask = np.zeros(mask.shape, bool) initial_objects_label_image, n_initial_objects = mh.label(mask > 0) for n in range(1, n_initial_objects + 1): obj = (initial_objects_label_image == n) if len(np.unique(separated_label_image[obj])) > 1: clumps_mask[obj] = True cut_mask = (mask > 0) & (separated_label_image == 0) cutlines = mh.morph.dilate(mh.labeled.bwperim(cut_mask)) if selection_test_mode: logger.info('create plot for selection test mode') # Check if selection_test_show_remaining is active # If so, show values on processed image, not original if selection_test_show_remaining: labeled_mask, n_objects = mh.label(separated_label_image > 0) logger.info('Selection test mode plot with processed image') else: labeled_mask, n_objects = mh.label(mask) f = Morphology(labeled_mask) values = f.extract() area_img = create_feature_image(values['Morphology_Area'].values, labeled_mask) convexity_img = create_feature_image( values['Morphology_Convexity'].values, labeled_mask) circularity_img = create_feature_image( values['Morphology_Circularity'].values, labeled_mask) plots = [ plotting.create_float_image_plot(area_img, 'ul'), plotting.create_float_image_plot(convexity_img, 'ur'), plotting.create_float_image_plot(circularity_img, 'll'), plotting.create_mask_overlay_image_plot( clumps_mask, cutlines, 'lr'), ] figure = plotting.create_figure( plots, title=('Selection criteria:' ' "area" (top left),' ' "convexity" (top-right),' ' and "circularity" (bottom-left);' ' cuts made (bottom right).')) else: logger.info('create plot') n_objects = len( np.unique(separated_label_image[separated_label_image > 0])) colorscale = plotting.create_colorscale('Spectral', n=n_objects, permute=True, add_background=True) outlines = mh.morph.dilate( mh.labeled.bwperim(separated_label_image > 0)) plots = [ plotting.create_mask_image_plot(separated_label_image, 'ul', colorscale=colorscale), plotting.create_intensity_overlay_image_plot( intensity_image, outlines, 'ur'), plotting.create_mask_overlay_image_plot( clumps_mask, cutlines, 'll') ] figure = plotting.create_figure(plots, title='separated clumps') else: figure = str() return Output(separated_label_image, figure)
def main(primary_label_image, intensity_image, contrast_threshold, min_threshold=None, max_threshold=None, plot=False): '''Detects secondary objects in an image by expanding the primary objects encoded in `primary_label_image`. The outlines of secondary objects are determined based on the watershed transform of `intensity_image` using the primary objects in `primary_label_image` as seeds. Parameters ---------- primary_label_image: numpy.ndarray[numpy.int32] 2D labeled array encoding primary objects, which serve as seeds for watershed transform intensity_image: numpy.ndarray[numpy.uint8 or numpy.uint16] 2D grayscale array that serves as gradient for watershed transform; optimally this image is enhanced with a low-pass filter contrast_threshold: int contrast threshold for automatic separation of forground from background based on locally adaptive thresholding (when ``0`` threshold defaults to `min_threshold` manual thresholding) min_threshold: int, optional minimal foreground value; pixels below `min_threshold` are considered background max_threshold: int, optional maximal foreground value; pixels above `max_threshold` are considered foreground plot: bool, optional whether a plot should be generated Returns ------- jtmodules.segment_secondary.Output Note ---- Setting `min_threshold` and `max_threshold` to the same value reduces to manual thresholding. ''' if np.any(primary_label_image == 0): has_background = True else: has_background = False if not has_background: secondary_label_image = primary_label_image else: # A simple, fixed threshold doesn't work for SE stains. Therefore, we # use adaptive thresholding to determine background regions, # i.e. regions in the intensity_image that should not be covered by # secondary objects. n_objects = len(np.unique(primary_label_image[1:])) logger.info( 'primary label image has %d objects', n_objects - 1 ) # SB: Added a catch for images with no primary objects # note that background is an 'object' if n_objects > 1: # TODO: consider using contrast_treshold as input parameter background_mask = mh.thresholding.bernsen( intensity_image, 5, contrast_threshold ) if min_threshold is not None: logger.info( 'set lower threshold level to %d', min_threshold ) background_mask[intensity_image < min_threshold] = True if max_threshold is not None: logger.info( 'set upper threshold level to %d', max_threshold ) background_mask[intensity_image > max_threshold] = False # background_mask = mh.morph.open(background_mask) background_label_image = mh.label(background_mask)[0] background_label_image[background_mask] += n_objects logger.info('detect secondary objects via watershed transform') secondary_label_image = expand_objects_watershed( primary_label_image, background_label_image, intensity_image ) else: logger.info('skipping secondary segmentation') secondary_label_image = np.zeros( primary_label_image.shape, dtype=np.int32 ) n_objects = len(np.unique(secondary_label_image)[1:]) logger.info('identified %d objects', n_objects) if plot: from jtlib import plotting colorscale = plotting.create_colorscale( 'Spectral', n=n_objects, permute=True, add_background=True ) outlines = mh.morph.dilate(mh.labeled.bwperim(secondary_label_image > 0)) plots = [ plotting.create_mask_image_plot( primary_label_image, 'ul', colorscale=colorscale ), plotting.create_mask_image_plot( secondary_label_image, 'ur', colorscale=colorscale ), plotting.create_intensity_overlay_image_plot( intensity_image, outlines, 'll' ) ] figure = plotting.create_figure(plots, title='secondary objects') else: figure = str() return Output(secondary_label_image, figure)
def main(primary_label_image, intensity_image, contrast_threshold, min_threshold=None, max_threshold=None, plot=False): '''Detects secondary objects in an image by expanding the primary objects encoded in `primary_label_image`. The outlines of secondary objects are determined based on the watershed transform of `intensity_image` using the primary objects in `primary_label_image` as seeds. Parameters ---------- primary_label_image: numpy.ndarray[numpy.int32] 2D labeled array encoding primary objects, which serve as seeds for watershed transform intensity_image: numpy.ndarray[numpy.uint8 or numpy.uint16] 2D grayscale array that serves as gradient for watershed transform; optimally this image is enhanced with a low-pass filter contrast_threshold: int contrast threshold for automatic separation of forground from background based on locally adaptive thresholding (when ``0`` threshold defaults to `min_threshold` manual thresholding) min_threshold: int, optional minimal foreground value; pixels below `min_threshold` are considered background max_threshold: int, optional maximal foreground value; pixels above `max_threshold` are considered foreground plot: bool, optional whether a plot should be generated Returns ------- jtmodules.segment_secondary.Output Note ---- Setting `min_threshold` and `max_threshold` to the same value reduces to manual thresholding. ''' if np.any(primary_label_image == 0): has_background = True else: has_background = False if not has_background: secondary_label_image = primary_label_image else: # A simple, fixed threshold doesn't work for SE stains. Therefore, we # use adaptive thresholding to determine background regions, # i.e. regions in the intensity_image that should not be covered by # secondary objects. n_objects = len(np.unique(primary_label_image[1:])) logger.info( 'primary label image has %d objects', n_objects - 1 ) # SB: Added a catch for images with no primary objects # note that background is an 'object' if n_objects > 1: # TODO: consider using contrast_treshold as input parameter background_mask = mh.thresholding.bernsen( intensity_image, 5, contrast_threshold ) if min_threshold is not None: logger.info( 'set lower threshold level to %d', min_threshold ) background_mask[intensity_image < min_threshold] = True if max_threshold is not None: logger.info( 'set upper threshold level to %d', max_threshold ) background_mask[intensity_image > max_threshold] = False # background_mask = mh.morph.open(background_mask) background_label_image = mh.label(background_mask)[0] background_label_image[background_mask] += n_objects logger.info('detect secondary objects via watershed transform') secondary_label_image = expand_objects_watershed( primary_label_image, background_label_image, intensity_image ) else: logger.info('skipping secondary segmentation') secondary_label_image = np.zeros( primary_label_image.shape, dtype=np.int32 ) n_objects = len(np.unique(secondary_label_image)[1:]) logger.info('identified %d objects', n_objects) if plot: from jtlib import plotting colorscale = plotting.create_colorscale( 'Spectral', n=n_objects, permute=True, add_background=True ) outlines = mh.morph.dilate(mh.labeled.bwperim(secondary_label_image > 0)) plots = [ plotting.create_mask_image_plot( primary_label_image, 'ul', colorscale=colorscale ), plotting.create_mask_image_plot( secondary_label_image, 'ur', colorscale=colorscale ), plotting.create_intensity_overlay_image_plot( intensity_image, outlines, 'll' ) ] figure = plotting.create_figure(plots, title='secondary objects') else: figure = str() return Output(secondary_label_image, figure)
def main(image, clipping_mask, plot=False): '''Clips a labeled image using another image as a mask, such that intersecting pixels/voxels are set to background. Parameters ---------- image: numpy.ndarray image that should be clipped clipping_mask: numpy.ndarray[numpy.int32 or numpy.bool] image that should be used as clipping mask plot: bool, optional whether a plot should be generated (default: ``False``) Returns ------- jtmodules.clip_objects.Output Raises ------ ValueError when `image` and `clipping_mask` don't have the same dimensions ''' if image.shape != clipping_mask.shape: raise ValueError( '"image" and "clipping_mask" must have the same dimensions' ) clipping_mask = clipping_mask > 0 clipped_image = image.copy() clipped_image[clipping_mask] = 0 if plot: from jtlib import plotting if str(image.dtype).startswith('uint'): plots = [ plotting.create_intensity_image_plot( image, 'ul', clip=True ), plotting.create_mask_image_plot( clipping_mask, 'ur' ), plotting.create_intensity_image_plot( clipped_image, 'll', clip=True ) ] else: n_objects = len(np.unique(image)[1:]) colorscale = plotting.create_colorscale( 'Spectral', n=n_objects, permute=True, add_background=True ) plots = [ plotting.create_mask_image_plot( image, 'ul', colorscale=colorscale ), plotting.create_mask_image_plot( clipping_mask, 'ur' ), plotting.create_mask_image_plot( clipped_image, 'll', colorscale=colorscale ) ] figure = plotting.create_figure(plots, title='clipped image') else: figure = str() return Output(clipped_image, figure)
def main(image, mask, threshold=1, min_area=3, mean_area=5, max_area=1000, clip_percentile=99.999, plot=False): '''Detects blobs in `image` using an implementation of `SExtractor <http://www.astromatic.net/software/sextractor>`_ [1]. The `image` is first convolved with a Laplacian of Gaussian filter of size `mean_area` to enhance blob-like structures. The enhanced image is then thresholded at `threshold` level and connected pixel components are subsequently deplended. Parameters ---------- image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]] grayscale image in which blobs should be detected mask: numpy.ndarray[Union[numpy.int32, numpy.bool]] binary or labeled image that specifies pixel regions of interest in which blobs should be detected threshold: int, optional threshold level for pixel values in the convolved image (default: ``1``) min_area: int, optional minimal size a blob is allowed to have (default: ``3``) mean_area: int, optional estimated average size of a blob (default: ``5``) max_area: int, optional maximal size a blob is allowed to have to be subject to deblending; no attempt will be made to deblend blobs larger than `max_area` (default: ``100``) clip_percentile: float, optional clip intensity values in `image` above the given percentile; this may help in attenuating artifacts plot: bool, optional whether a plot should be generated (default: ``False``) Returns ------- jtmodules.detect_blobs.Output[Union[numpy.ndarray, str]] References ---------- .. [1] Bertin, E. & Arnouts, S. 1996: SExtractor: Software for source extraction, Astronomy & Astrophysics Supplement 317, 393 ''' logger.info('detect blobs above threshold {0}'.format(threshold)) detect_image = image.copy() p = np.percentile(image, clip_percentile) detect_image[image > p] = p # Enhance the image for blob detection by convoling it with a LOG filter f = -1 * log_2d(size=mean_area, sigma=float(mean_area - 1)/3) detect_image = mh.convolve(detect_image.astype(float), f) detect_image[detect_image < 0] = 0 # Mask regions of too big blobs pre_blobs = mh.label(detect_image > threshold)[0] bad_blobs, n_bad = mh.labeled.filter_labeled(pre_blobs, min_size=max_area) logger.info( 'remove {0} blobs because they are bigger than {1} pixels'.format( n_bad, max_area ) ) detect_mask = np.invert(mask > 0) detect_mask[bad_blobs > 0] = True detect_image[bad_blobs > 0] = 0 logger.info('deblend blobs') blobs, centroids = detect_blobs( image=detect_image, mask=detect_mask, threshold=threshold, min_area=min_area ) n = len(np.unique(blobs[blobs>0])) logger.info('{0} blobs detected'.format(n)) if plot: logger.info('create plot') from jtlib import plotting colorscale = plotting.create_colorscale( 'Spectral', n=n, permute=True, add_background=True ) plots = [ plotting.create_float_image_plot( detect_image, 'ul', clip=True ), plotting.create_mask_image_plot( blobs, 'ur', colorscale=colorscale ) ] figure = plotting.create_figure( plots, title=( 'detected #{0} blobs above threshold {1}' ' in LOG filtered image'.format(n, threshold) ) ) else: figure = str() return Output(centroids, blobs, figure)