def segment_images(inpDir, outDir, config_data):
    """ Workflow for data with a spotty appearance
    in each 2d frame such as fibrillarin and beta catenin.

    Args:
        inpDir : path to the input directory
        outDir : path to the output directory
        config_data : path to the configuration file
    """

    logging.basicConfig(
        format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
        datefmt='%d-%b-%y %H:%M:%S')
    logger = logging.getLogger("main")
    logger.setLevel(logging.INFO)

    inpDir_files = os.listdir(inpDir)
    for i, f in enumerate(inpDir_files):
        logger.info('Segmenting image : {}'.format(f))

        # Load image
        br = BioReader(os.path.join(inpDir, f))
        image = br.read_image()
        structure_channel = 0
        struct_img0 = image[:, :, :, structure_channel, 0]
        struct_img0 = struct_img0.transpose(2, 0, 1).astype(np.float32)

        # main algorithm
        intensity_scaling_param = config_data['intensity_scaling_param']
        struct_img = intensity_normalization(
            struct_img0, scaling_param=intensity_scaling_param)
        gaussian_smoothing_sigma = config_data['gaussian_smoothing_sigma']
        structure_img_smooth = image_smoothing_gaussian_3d(
            struct_img, sigma=gaussian_smoothing_sigma)
        s2_param = config_data['s2_param']
        bw = dot_2d_slice_by_slice_wrapper(structure_img_smooth, s2_param)
        minArea = config_data['minArea']
        seg = remove_small_objects(bw > 0,
                                   min_size=minArea,
                                   connectivity=1,
                                   in_place=False)
        seg = seg > 0
        out_img = seg.astype(np.uint8)
        out_img[out_img > 0] = 255

        # create output image
        out_img = out_img.transpose(1, 2, 0)
        out_img = out_img.reshape(
            (out_img.shape[0], out_img.shape[1], out_img.shape[2], 1, 1))

        # write image using BFIO
        bw = BioWriter(os.path.join(outDir, f))
        bw.num_x(out_img.shape[1])
        bw.num_y(out_img.shape[0])
        bw.num_z(out_img.shape[2])
        bw.num_c(out_img.shape[3])
        bw.num_t(out_img.shape[4])
        bw.pixel_type(dtype='uint8')
        bw.write_image(out_img)
        bw.close_image()
def segment_images(inpDir, outDir, config_data): 
    """ Workflow for data with similar morphology
    as sialyltransferase 1.

    Args:
        inpDir : path to the input directory
        outDir : path to the output directory
        config_data : path to the configuration file
    """

    logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
                        datefmt='%d-%b-%y %H:%M:%S')
    logger = logging.getLogger("main")
    logger.setLevel(logging.INFO)

    inpDir_files = os.listdir(inpDir)
    for i,f in enumerate(inpDir_files):
        logger.info('Segmenting image : {}'.format(f))
        
        # Load image
        br = BioReader(os.path.join(inpDir,f))
        image = br.read_image()
        structure_channel = 0 
        struct_img0 = image[:,:,:,structure_channel,0]
        struct_img0 = struct_img0.transpose(2,0,1).astype(np.float32)

        # main algorithm
        intensity_scaling_param = config_data['intensity_scaling_param']
        struct_img = intensity_normalization(struct_img0, scaling_param=intensity_scaling_param) 
        gaussian_smoothing_sigma = config_data['gaussian_smoothing_sigma'] 
        structure_img_smooth = image_smoothing_gaussian_3d(struct_img, sigma=gaussian_smoothing_sigma)
        global_thresh_method = config_data['global_thresh_method'] 
        object_minArea = config_data['object_minArea'] 
        bw, object_for_debug = MO(structure_img_smooth, global_thresh_method=global_thresh_method, object_minArea=object_minArea, return_object=True)
        thin_dist_preserve = config_data['thin_dist_preserve']
        thin_dist = config_data['thin_dist']
        bw_thin = topology_preserving_thinning(bw>0, thin_dist_preserve, thin_dist)
        s3_param = config_data['s3_param']
        bw_extra = dot_3d_wrapper(structure_img_smooth, s3_param)
        bw_combine = np.logical_or(bw_extra>0, bw_thin)
        minArea = config_data['minArea']
        seg = remove_small_objects(bw_combine>0, min_size=minArea, connectivity=1, in_place=False)
        seg = seg > 0
        out_img=seg.astype(np.uint8)
        out_img[out_img>0]=255
    
        # create output image
        out_img = out_img.transpose(1,2,0)
        out_img = out_img.reshape((out_img.shape[0], out_img.shape[1], out_img.shape[2], 1, 1))

        # write image using BFIO
        bw = BioWriter(os.path.join(outDir,f), metadata=br.read_metadata())
        bw.num_x(out_img.shape[1])
        bw.num_y(out_img.shape[0])
        bw.num_z(out_img.shape[2])
        bw.num_c(out_img.shape[3])
        bw.num_t(out_img.shape[4])
        bw.pixel_type(dtype='uint8')
        bw.write_image(out_img)
        bw.close_image()
def nuc_seg(image, scale, sigma, f2, hol_min, hol_max, minA):
    img = intensity_normalization(image, scale)
    img_s = image_smoothing_gaussian_3d(img, sigma)
    bw = filament_2d_wrapper(img_s, f2)
    bw_2 = hole_filling(bw, hol_min, hol_max)
    seg = remove_small_objects(bw_2 > 0,
                               min_size=minA,
                               connectivity=1,
                               in_place=False)
    seg2, n = label(seg, return_num=True)
    return (img_s, bw_2, seg2, n)
def pex_seg(image, img_seg):
    slices = []
    Vol = []
    pex_seg_img = []
    pex_seg_n = []
    slices = ndi.find_objects(img_seg)
    img = intensity_normalization(image, scale)
    img = image_smoothing_gaussian_3d(img, sigma)
    for i, c in enumerate(slices):
        cell_V = img_seg[c]
        vt, f, n, val = marching_cubes_lewiner(cell_V)
        Tvol = 1 / 6 * det(vt[f])
        vol = abs(sum(Tvol))
        Vol.append(vol)
        cell = image[c]
        cell_s = img[c]
        bw = dot_3d_wrapper(cell_s, s3)
        mask = remove_small_objects(bw > 0,
                                    min_size=2,
                                    connectivity=1,
                                    in_place=False)
        seed = dilation(peak_local_max(cell,
                                       labels=label(mask),
                                       min_distance=2,
                                       indices=False),
                        selem=ball(1))
        ws_map = -1 * ndi.distance_transform_edt(bw)
        seg = watershed(ws_map, label(seed), mask=mask, watershed_line=True)
        regions = regionprops(seg)
        n = len(regions)
        n = max(0, n)
        pex_seg_img.append(seg)
        pex_seg_n.append(n)
    df = pd.DataFrame({
        "Slices": slices,
        "Cell Vol": Vol,
        "Pex count": pex_seg_n
    })
    return df, pex_seg_img
def Workflow_cardio_npm1_100x(
    struct_img: np.ndarray,
    rescale_ratio: float = -1,
    output_type: str = "default",
    output_path: Union[str, Path] = None,
    fn: Union[str, Path] = None,
    output_func=None,
):
    """
    classic segmentation workflow wrapper for structure Cardio NPM1 100x

    Parameter:
    -----------
    struct_img: np.ndarray
        the 3D image to be segmented
    rescale_ratio: float
        an optional parameter to allow rescale the image before running the
        segmentation functions, default is no rescaling
    output_type: str
        select how to handle output. Currently, four types are supported:
        1. default: the result will be saved at output_path whose filename is
            original name without extention + "_struct_segmentaiton.tiff"
        2. array: the segmentation result will be simply returned as a numpy array
        3. array_with_contour: segmentation result will be returned together with
            the contour of the segmentation
        4. customize: pass in an extra output_func to do a special save. All the
            intermediate results, names of these results, the output_path, and the
            original filename (without extension) will be passed in to output_func.
    """
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [0.5, 2.5]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    dot_2d_sigma = 2
    # dot_2d_sigma_extra = 1
    # dot_2d_cutoff = 0.025
    minArea = 1
    low_level_min_size = 1000
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img,
                                         scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append("im_norm")

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio),
                          order=2)

        struct_img = (struct_img - struct_img.min() +
                      1e-8) / (struct_img.max() - struct_img.min() + 1e-8)
        gaussian_smoothing_truncate_range = (
            gaussian_smoothing_truncate_range * rescale_ratio)

    # smoothing with gaussian filter
    structure_img_smooth = image_smoothing_gaussian_3d(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range,
    )

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append("im_smooth")

    ###################
    # core algorithm
    ###################

    # step 1: low level thresholding
    # global_otsu = threshold_otsu(structure_img_smooth)
    global_tri = threshold_triangle(structure_img_smooth)
    global_median = np.percentile(structure_img_smooth, 50)

    th_low_level = (global_tri + global_median) / 2

    # print(global_median)
    # print(global_tri)
    # print(th_low_level)
    # imsave('img_smooth.tiff', structure_img_smooth)

    bw_low_level = structure_img_smooth > th_low_level
    bw_low_level = remove_small_objects(bw_low_level,
                                        min_size=low_level_min_size,
                                        connectivity=1,
                                        in_place=True)
    bw_low_level = dilation(bw_low_level, selem=ball(2))

    # step 2: high level thresholding
    local_cutoff = 0.333 * threshold_otsu(structure_img_smooth)
    bw_high_level = np.zeros_like(bw_low_level)
    lab_low, num_obj = label(bw_low_level, return_num=True, connectivity=1)
    for idx in range(num_obj):
        single_obj = lab_low == (idx + 1)
        local_otsu = threshold_otsu(structure_img_smooth[single_obj])
        if local_otsu > local_cutoff:
            bw_high_level[np.logical_and(
                structure_img_smooth > 1.2 * local_otsu, single_obj)] = 1

    # imsave('seg_coarse.tiff', bw_high_level.astype(np.uint8))

    out_img_list.append(bw_high_level.copy())
    out_name_list.append("bw_coarse")

    response_bright = dot_slice_by_slice(structure_img_smooth,
                                         log_sigma=dot_2d_sigma)

    bw_extra = response_bright > 0.03  # dot_2d_cutoff
    bw_extra[~bw_low_level] = 0

    bw_final = np.logical_or(bw_extra, bw_high_level)
    # bw_final[holes]=0

    ###################
    # POST-PROCESSING
    ###################
    seg = remove_small_objects(bw_final,
                               min_size=minArea,
                               connectivity=1,
                               in_place=True)

    # output
    seg = seg > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append("bw_final")

    if output_type == "default":
        # the default final output, simply save it to the output path
        save_segmentation(seg, False, Path(output_path), fn)
    elif output_type == "customize":
        # the hook for passing in a customized output function
        # use "out_img_list" and "out_name_list" in your hook to
        # customize your output functions
        output_func(out_img_list, out_name_list, Path(output_path), fn)
    elif output_type == "array":
        return seg
    elif output_type == "array_with_contour":
        return (seg, generate_segmentation_contour(seg))
    else:
        raise NotImplementedError("invalid output type: {output_type}")
def Workflow_ctnnb1(
    struct_img: np.ndarray,
    rescale_ratio: float = -1,
    output_type: str = "default",
    output_path: Union[str, Path] = None,
    fn: Union[str, Path] = None,
    output_func=None,
):
    """
    classic segmentation workflow wrapper for structure CTNNB1

    Parameter:
    -----------
    struct_img: np.ndarray
        the 3D image to be segmented
    rescale_ratio: float
        an optional parameter to allow rescale the image before running the
        segmentation functions, default is no rescaling
    output_type: str
        select how to handle output. Currently, four types are supported:
        1. default: the result will be saved at output_path whose filename is
            original name without extention + "_struct_segmentaiton.tiff"
        2. array: the segmentation result will be simply returned as a numpy array
        3. array_with_contour: segmentation result will be returned together with
            the contour of the segmentation
        4. customize: pass in an extra output_func to do a special save. All the
            intermediate results, names of these results, the output_path, and the
            original filename (without extension) will be passed in to output_func.
    """
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [4, 27]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    dot_2d_sigma = 1.5
    dot_2d_cutoff = 0.01
    minArea = 10
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img,
                                         scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append("im_norm")

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio),
                          order=2)

        struct_img = (struct_img - struct_img.min() +
                      1e-8) / (struct_img.max() - struct_img.min() + 1e-8)
        gaussian_smoothing_truncate_range = (
            gaussian_smoothing_truncate_range * rescale_ratio)

    # smoothing
    structure_img_smooth = image_smoothing_gaussian_3d(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range,
    )

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append("im_smooth")

    ###################
    # core algorithm
    ###################

    response = dot_slice_by_slice(structure_img_smooth, log_sigma=dot_2d_sigma)
    bw = response > dot_2d_cutoff

    ###################
    # POST-PROCESSING
    ###################
    seg = remove_small_objects(bw,
                               min_size=minArea,
                               connectivity=1,
                               in_place=False)

    # output
    seg = seg > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append("bw_final")

    if output_type == "default":
        # the default final output, simply save it to the output path
        save_segmentation(seg, False, Path(output_path), fn)
    elif output_type == "customize":
        # the hook for passing in a customized output function
        # use "out_img_list" and "out_name_list" in your hook to
        # customize your output functions
        output_func(out_img_list, out_name_list, Path(output_path), fn)
    elif output_type == "array":
        return seg
    elif output_type == "array_with_contour":
        return (seg, generate_segmentation_contour(seg))
    else:
        raise NotImplementedError("invalid output type: {output_type}")
Exemple #7
0
def segment_images(inpDir, outDir, config_data):
    """ Workflow for data with shell like shapes 
    such as lamin B1 (interphase-specific)

    Args:
        inpDir : path to the input directory
        outDir : path to the output directory
        config_data : path to the configuration file
    """

    logging.basicConfig(
        format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
        datefmt='%d-%b-%y %H:%M:%S')
    logger = logging.getLogger("main")
    logger.setLevel(logging.INFO)

    inpDir_files = os.listdir(inpDir)
    for i, f in enumerate(inpDir_files):
        logger.info('Segmenting image : {}'.format(f))

        # Load image
        br = BioReader(os.path.join(inpDir, f))
        image = br.read_image()
        structure_channel = 0
        struct_img0 = image[:, :, :, structure_channel, 0]
        struct_img0 = struct_img0.transpose(2, 0, 1).astype(np.float32)

        # main algorithm
        intensity_scaling_param = config_data['intensity_scaling_param']
        struct_img = intensity_normalization(
            struct_img0, scaling_param=intensity_scaling_param)
        gaussian_smoothing_sigma = config_data['gaussian_smoothing_sigma']
        structure_img_smooth = image_smoothing_gaussian_3d(
            struct_img, sigma=gaussian_smoothing_sigma)
        middle_frame_method = config_data['middle_frame_method']
        mid_z = get_middle_frame(structure_img_smooth,
                                 method=middle_frame_method)
        f2_param = config_data['f2_param']
        bw_mid_z = filament_2d_wrapper(structure_img_smooth[mid_z, :, :],
                                       f2_param)
        hole_max = config_data['hole_max']
        hole_min = config_data['hole_min']
        bw_fill_mid_z = hole_filling(bw_mid_z, hole_min, hole_max)
        seed = get_3dseed_from_mid_frame(
            np.logical_xor(bw_fill_mid_z, bw_mid_z), struct_img.shape, mid_z,
            hole_min)
        bw_filled = watershed(
            struct_img, seed.astype(int), watershed_line=True) > 0
        seg = np.logical_xor(bw_filled, dilation(bw_filled, selem=ball(1)))
        seg = seg > 0
        out_img = seg.astype(np.uint8)
        out_img[out_img > 0] = 255

        # create output image
        out_img = out_img.transpose(1, 2, 0)
        out_img = out_img.reshape(
            (out_img.shape[0], out_img.shape[1], out_img.shape[2], 1, 1))

        # write image using BFIO
        bw = BioWriter(os.path.join(outDir, f))
        bw.num_x(out_img.shape[1])
        bw.num_y(out_img.shape[0])
        bw.num_z(out_img.shape[2])
        bw.num_c(out_img.shape[3])
        bw.num_t(out_img.shape[4])
        bw.pixel_type(dtype='uint8')
        bw.write_image(out_img)
        bw.close_image()
Exemple #8
0
def Workflow_lmnb1_mitotic(struct_img,
                           rescale_ratio,
                           output_type,
                           output_path,
                           fn,
                           output_func=None):
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [4000]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    f2_param = [[0.5, 0.01]]
    minArea = 5
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img,
                                         scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append('im_norm')

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = resize(struct_img, [1, rescale_ratio, rescale_ratio],
                            method="cubic")
        struct_img = (struct_img - struct_img.min() +
                      1e-8) / (struct_img.max() - struct_img.min() + 1e-8)

    # smoothing with boundary preserving smoothing
    structure_img_smooth = image_smoothing_gaussian_3d(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range)

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append('im_smooth')

    ###################
    # core algorithm
    ###################

    # 2d filament filter on the middle frame
    bw = filament_2d_wrapper(structure_img_smooth, f2_param)

    ###################
    # POST-PROCESSING
    ###################
    bw = remove_small_objects(bw > 0,
                              min_size=minArea,
                              connectivity=1,
                              in_place=False)

    # output
    seg = bw > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append('bw_final')

    if output_type == 'default':
        # the default final output
        save_segmentation(seg, False, output_path, fn)
    elif output_type == 'array':
        return seg
    elif output_type == 'array_with_contour':
        return (seg, generate_segmentation_contour(seg))
    else:
        print('your can implement your output hook here, but not yet')
        quit()
def Workflow_fbl_labelfree_4dn(
    struct_img: np.ndarray,
    rescale_ratio: float = -1,
    output_type: str = "default",
    output_path: Union[str, Path] = None,
    fn: Union[str, Path] = None,
    output_func=None,
):
    """
    classic segmentation workflow wrapper for structure FBL Labelfree 4dn

    Parameter:
    -----------
    struct_img: np.ndarray
        the 3D image to be segmented
    rescale_ratio: float
        an optional parameter to allow rescale the image before running the
        segmentation functions, default is no rescaling
    output_type: str
        select how to handle output. Currently, four types are supported:
        1. default: the result will be saved at output_path whose filename is
            original name without extention + "_struct_segmentaiton.tiff"
        2. array: the segmentation result will be simply returned as a numpy array
        3. array_with_contour: segmentation result will be returned together with
            the contour of the segmentation
        4. customize: pass in an extra output_func to do a special save. All the
            intermediate results, names of these results, the output_path, and the
            original filename (without extension) will be passed in to output_func.
    """
    ##########################################################################
    # PARAMETERS:
    minArea = 5
    low_level_min_size = 7000
    s2_param = [[0.5, 0.1]]
    intensity_scaling_param = [0.5, 19.5]
    gaussian_smoothing_sigma = 1

    ##########################################################################
    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization
    struct_norm = intensity_normalization(
        struct_img, scaling_param=intensity_scaling_param)
    out_img_list.append(struct_img.copy())
    out_name_list.append("im_norm")

    # smoothing
    struct_smooth = image_smoothing_gaussian_3d(struct_norm,
                                                sigma=gaussian_smoothing_sigma)

    out_img_list.append(struct_smooth.copy())
    out_name_list.append("im_smooth")

    ###################
    # core algorithm
    ###################
    # step 1: low level thresholding
    # global_otsu = threshold_otsu(structure_img_smooth)
    global_tri = threshold_triangle(struct_smooth)
    global_median = np.percentile(struct_smooth, 50)

    th_low_level = (global_tri + global_median) / 2
    bw_low_level = struct_smooth > th_low_level
    bw_low_level = remove_small_objects(bw_low_level,
                                        min_size=low_level_min_size,
                                        connectivity=1,
                                        in_place=True)

    # step 2: high level thresholding
    bw_high_level = np.zeros_like(bw_low_level)
    lab_low, num_obj = label(bw_low_level, return_num=True, connectivity=1)
    for idx in range(num_obj):
        single_obj = lab_low == (idx + 1)
        local_otsu = threshold_otsu(struct_smooth[single_obj])
        bw_high_level[np.logical_and(struct_smooth > local_otsu * 1.2,
                                     single_obj)] = 1

    # step 3: finer segmentation
    response2d = dot_2d_slice_by_slice_wrapper(struct_smooth, s2_param)
    bw_finer = remove_small_objects(response2d,
                                    min_size=minArea,
                                    connectivity=1,
                                    in_place=True)

    # merge finer level detection into high level coarse segmentation
    # to include outside dim parts
    bw_high_level[bw_finer > 0] = 1

    ###################
    # POST-PROCESSING
    # make sure the variable name of final segmentation is 'seg'
    ###################
    seg = remove_small_objects(bw_high_level,
                               min_size=minArea,
                               connectivity=1,
                               in_place=True)

    # output
    seg = seg > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append("bw_final")

    if output_type == "default":
        # the default final output, simply save it to the output path
        save_segmentation(seg, False, Path(output_path), fn)
    elif output_type == "customize":
        # the hook for passing in a customized output function
        # use "out_img_list" and "out_name_list" in your hook to
        # customize your output functions
        # TODO CREATE OUT_IMG_LIST/OUT_NAMELIST?
        output_func(out_img_list, out_name_list, Path(output_path), fn)
    elif output_type == "array":
        return seg
    elif output_type == "array_with_contour":
        return (seg, generate_segmentation_contour(seg))
    else:
        raise NotImplementedError("invalid output type: {output_type}")
# get voxel dimensions
dim_iter = mtree.find('Metadata').find('Scaling').find('Items').findall('Distance')
dims = {}
for dimension in dim_iter:
    dim_id = dimension.get('Id')
    dims.update({dim_id:float(dimension.find('Value').text)*1.E6}) # [um]
voxel_vol = dims['X'] * dims['Y'] * dims['Z']  # [um^3]
dims_xyz = np.array([dims['X'], dims['Y'], dims['Z']])


#--  SEGMENT SPOTS  -----------------------------------------------------------#
for i, g in enumerate(genes):
    print('\nProcessing gene ' + g + '...\n')
    print('Smoothing image...')
    img_rna_smooth = image_smoothing_gaussian_3d(imgs_rna[i], sigma=1)
    s3_param = [[1, 0.75*t_spots[i]], [2, 0.75*t_spots[i]]]
    print('Identifying spots...')
    bw = dot_3d_wrapper(img_rna_smooth, s3_param)

    # watershed
    minArea = 4
    Mask = morphology.remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False) 
    labeled_mask = measure.label(Mask)
    
    print('Performing watershed segmentation...')
    peaks = feature.peak_local_max(imgs_rna[i],labels=labeled_mask, min_distance=2, indices=False)
    Seed = morphology.binary_dilation(peaks, selem=morphology.ball(1))
    Watershed_Map = -1*distance_transform_edt(bw)
    seg = morphology.watershed(Watershed_Map, measure.label(Seed), mask=Mask, watershed_line=True)
    seg = morphology.remove_small_objects(seg>0, min_size=minArea, connectivity=1, in_place=False)
Exemple #11
0
def Workflow_cardio_npm1_100x(struct_img,
                              rescale_ratio,
                              output_type,
                              output_path,
                              fn,
                              output_func=None):
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [0.5, 2.5]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    dot_2d_sigma = 2
    dot_2d_sigma_extra = 1
    dot_2d_cutoff = 0.025
    minArea = 1
    low_level_min_size = 1000
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img,
                                         scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append('im_norm')

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = resize(struct_img, [1, rescale_ratio, rescale_ratio],
                            method="cubic")
        struct_img = (struct_img - struct_img.min() +
                      1e-8) / (struct_img.max() - struct_img.min() + 1e-8)
        gaussian_smoothing_truncate_range = gaussian_smoothing_truncate_range * rescale_ratio

    # smoothing with gaussian filter
    structure_img_smooth = image_smoothing_gaussian_3d(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range)

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append('im_smooth')

    ###################
    # core algorithm
    ###################

    # step 1: low level thresholding
    #global_otsu = threshold_otsu(structure_img_smooth)
    global_tri = threshold_triangle(structure_img_smooth)
    global_median = np.percentile(structure_img_smooth, 50)

    th_low_level = (global_tri + global_median) / 2

    #print(global_median)
    #print(global_tri)
    #print(th_low_level)
    #imsave('img_smooth.tiff', structure_img_smooth)

    bw_low_level = structure_img_smooth > th_low_level
    bw_low_level = remove_small_objects(bw_low_level,
                                        min_size=low_level_min_size,
                                        connectivity=1,
                                        in_place=True)
    bw_low_level = dilation(bw_low_level, selem=ball(2))

    # step 2: high level thresholding
    local_cutoff = 0.333 * threshold_otsu(structure_img_smooth)
    bw_high_level = np.zeros_like(bw_low_level)
    lab_low, num_obj = label(bw_low_level, return_num=True, connectivity=1)
    for idx in range(num_obj):
        single_obj = (lab_low == (idx + 1))
        local_otsu = threshold_otsu(structure_img_smooth[single_obj])
        if local_otsu > local_cutoff:
            bw_high_level[np.logical_and(
                structure_img_smooth > 1.2 * local_otsu, single_obj)] = 1

    #imsave('seg_coarse.tiff', bw_high_level.astype(np.uint8))

    out_img_list.append(bw_high_level.copy())
    out_name_list.append('bw_coarse')

    response_bright = dot_slice_by_slice(structure_img_smooth,
                                         log_sigma=dot_2d_sigma)
    '''
    response_dark = dot_slice_by_slice(1 - structure_img_smooth, log_sigma=dot_2d_sigma)
    response_dark_extra = dot_slice_by_slice(1 - structure_img_smooth, log_sigma=dot_2d_sigma_extra)

    imsave('res_dark_1.tiff', response_dark)
    imsave('res_dark_2.tiff', response_dark_extra)

    #inner_mask = bw_high_level.copy()
    #for zz in range(inner_mask.shape[0]):
    #    inner_mask[zz,:,:] = binary_fill_holes(inner_mask[zz,:,:])

    holes = np.logical_or(response_dark>dot_2d_cutoff , response_dark_extra>dot_2d_cutoff)
    #holes[~inner_mask] = 0
    '''

    bw_extra = response_bright > 0.03  # dot_2d_cutoff
    bw_extra[~bw_low_level] = 0

    bw_final = np.logical_or(bw_extra, bw_high_level)
    #bw_final[holes]=0

    ###################
    # POST-PROCESSING
    ###################
    seg = remove_small_objects(bw_final,
                               min_size=minArea,
                               connectivity=1,
                               in_place=True)

    # output
    seg = seg > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append('bw_final')

    if output_type == 'default':
        # the default final output
        save_segmentation(seg, False, output_path, fn)
    elif output_type == 'AICS_pipeline':
        # pre-defined output function for pipeline data
        save_segmentation(seg, True, output_path, fn)
    elif output_type == 'customize':
        # the hook for passing in a customized output function
        output_fun(out_img_list, out_name_list, output_path, fn)
    elif output_type == 'array':
        return seg
    elif output_type == 'array_with_contour':
        return (seg, generate_segmentation_contour(seg))
    else:
        # the hook for pre-defined RnD output functions (AICS internal)
        img_list, name_list = NPM1_output(out_img_list, out_name_list,
                                          output_type, output_path, fn)
        if output_type == 'QCB':
            return img_list, name_list
Exemple #12
0
def segment_images(inpDir, outDir, config_data):
    """ Workflow for dot like shapes such as
    Centrin-2, Desmoplakin, PMP34. 

    Args:
        inpDir : path to the input directory
        outDir : path to the output directory
        config_data : path to the configuration file
    """

    logging.basicConfig(
        format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
        datefmt='%d-%b-%y %H:%M:%S')
    logger = logging.getLogger("main")
    logger.setLevel(logging.INFO)

    inpDir_files = os.listdir(inpDir)
    for i, f in enumerate(inpDir_files):
        logger.info('Segmenting image : {}'.format(f))

        # Load an image
        br = BioReader(os.path.join(inpDir, f))
        image = br.read_image()
        structure_channel = 0
        struct_img0 = image[:, :, :, structure_channel, 0]
        struct_img0 = struct_img0.transpose(2, 0, 1).astype(np.float32)

        # main algorithm
        intensity_scaling_param = config_data['intensity_scaling_param']
        struct_img = intensity_normalization(
            struct_img0, scaling_param=intensity_scaling_param)

        gaussian_smoothing_sigma = config_data['gaussian_smoothing_sigma']
        if config_data["gaussian_smoothing"] == "gaussian_slice_by_slice":
            structure_img_smooth = image_smoothing_gaussian_slice_by_slice(
                struct_img, sigma=gaussian_smoothing_sigma)
        else:
            structure_img_smooth = image_smoothing_gaussian_3d(
                struct_img, sigma=gaussian_smoothing_sigma)
        s3_param = config_data['s3_param']
        bw = dot_3d_wrapper(structure_img_smooth, s3_param)
        minArea = config_data['minArea']
        Mask = remove_small_objects(bw > 0,
                                    min_size=minArea,
                                    connectivity=1,
                                    in_place=False)
        Seed = dilation(peak_local_max(struct_img,
                                       labels=label(Mask),
                                       min_distance=2,
                                       indices=False),
                        selem=ball(1))
        Watershed_Map = -1 * distance_transform_edt(bw)
        seg = watershed(Watershed_Map,
                        label(Seed),
                        mask=Mask,
                        watershed_line=True)
        seg = remove_small_objects(seg > 0,
                                   min_size=minArea,
                                   connectivity=1,
                                   in_place=False)
        seg = seg > 0
        out_img = seg.astype(np.uint8)
        out_img[out_img > 0] = 255

        # create output image
        out_img = out_img.transpose(1, 2, 0)
        out_img = out_img.reshape(
            (out_img.shape[0], out_img.shape[1], out_img.shape[2], 1, 1))

        # write image using BFIO
        bw = BioWriter(os.path.join(outDir, f))
        bw.num_x(out_img.shape[1])
        bw.num_y(out_img.shape[0])
        bw.num_z(out_img.shape[2])
        bw.num_c(out_img.shape[3])
        bw.num_t(out_img.shape[4])
        bw.pixel_type(dtype='uint8')
        bw.write_image(out_img)
        bw.close_image()
Exemple #13
0
def Workflow_lmnb1_interphase(struct_img,
                              rescale_ratio,
                              output_type,
                              output_path,
                              fn,
                              output_func=None):
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [4000]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    f2_param = [[1, 0.01], [2, 0.01], [3, 0.01]]
    hole_max = 40000
    hole_min = 400
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img,
                                         scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append('im_norm')

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = resize(struct_img, [1, rescale_ratio, rescale_ratio],
                            method="cubic")
        struct_img = (struct_img - struct_img.min() +
                      1e-8) / (struct_img.max() - struct_img.min() + 1e-8)

    # smoothing with boundary preserving smoothing
    structure_img_smooth = image_smoothing_gaussian_3d(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range)

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append('im_smooth')

    ###################
    # core algorithm
    ###################

    # get the mid frame
    mid_z = get_middle_frame(structure_img_smooth, method='intensity')

    # 2d filament filter on the middle frame
    bw_mid_z = filament_2d_wrapper(structure_img_smooth[mid_z, :, :], f2_param)

    # hole filling
    bw_fill_mid_z = hole_filling(bw_mid_z, hole_min, hole_max)
    seed = get_3dseed_from_mid_frame(np.logical_xor(bw_fill_mid_z, bw_mid_z),
                                     struct_img.shape,
                                     mid_z,
                                     hole_min,
                                     bg_seed=True)
    seg_filled = watershed(
        struct_img, seed.astype(int),
        watershed_line=True) > 1  # in watershed result, 1 is for background

    # get the actual shell
    seg = find_boundaries(seg_filled, connectivity=1, mode='thick')

    # output
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append('bw_final')

    if output_type == 'default':
        # the default final output
        save_segmentation(seg, False, output_path, fn)
    elif output_type == 'array':
        return seg
    elif output_type == 'array_with_contour':
        return (seg, generate_segmentation_contour(seg))
    else:
        print('your can implement your output hook here, but not yet')
        quit()
def cell_mask(image, scale, sigma):
    img1 = intensity_normalization(image, scale)
    img_s = image_smoothing_gaussian_3d(img1, sigma)
    thr = threshold_otsu(img_s)
    bw = img_s > thr * 0.8
    return (img_s, bw)
def Workflow_lmnb1_interphase(
    struct_img: np.ndarray,
    rescale_ratio: float = -1,
    output_type: str = "default",
    output_path: Union[str, Path] = None,
    fn: Union[str, Path] = None,
    output_func=None,
):
    """
    classic segmentation workflow wrapper for structure LMNB1 interphase

    Parameter:
    -----------
    struct_img: np.ndarray
        the 3D image to be segmented
    rescale_ratio: float
        an optional parameter to allow rescale the image before running the
        segmentation functions, default is no rescaling
    output_type: str
        select how to handle output. Currently, four types are supported:
        1. default: the result will be saved at output_path whose filename is
            original name without extention + "_struct_segmentaiton.tiff"
        2. array: the segmentation result will be simply returned as a numpy array
        3. array_with_contour: segmentation result will be returned together with
            the contour of the segmentation
        4. customize: pass in an extra output_func to do a special save. All the
            intermediate results, names of these results, the output_path, and the
            original filename (without extension) will be passed in to output_func.
    """
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [4000]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    f2_param = [[1, 0.01], [2, 0.01], [3, 0.01]]
    hole_max = 40000
    hole_min = 400
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img,
                                         scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append("im_norm")

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio),
                          order=2)

        struct_img = (struct_img - struct_img.min() +
                      1e-8) / (struct_img.max() - struct_img.min() + 1e-8)

    # smoothing with boundary preserving smoothing
    structure_img_smooth = image_smoothing_gaussian_3d(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range,
    )

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append("im_smooth")

    ###################
    # core algorithm
    ###################

    # get the mid frame
    mid_z = get_middle_frame(structure_img_smooth, method="intensity")

    # 2d filament filter on the middle frame
    bw_mid_z = filament_2d_wrapper(structure_img_smooth[mid_z, :, :], f2_param)

    # hole filling
    bw_fill_mid_z = hole_filling(bw_mid_z, hole_min, hole_max)
    seed = get_3dseed_from_mid_frame(
        np.logical_xor(bw_fill_mid_z, bw_mid_z),
        struct_img.shape,
        mid_z,
        hole_min,
        bg_seed=True,
    )
    seg_filled = (
        watershed(struct_img, seed.astype(int), watershed_line=True) > 1
    )  # in watershed result, 1 is for background

    # get the actual shell
    seg = find_boundaries(seg_filled, connectivity=1, mode="thick")

    # output
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append("bw_final")

    if output_type == "default":
        # the default final output, simply save it to the output path
        save_segmentation(seg, False, Path(output_path), fn)
    elif output_type == "customize":
        # the hook for passing in a customized output function
        # use "out_img_list" and "out_name_list" in your hook to
        # customize your output functions
        output_func(out_img_list, out_name_list, Path(output_path), fn)
    elif output_type == "array":
        return seg
    elif output_type == "array_with_contour":
        return (seg, generate_segmentation_contour(seg))
    else:
        raise NotImplementedError("invalid output type: {output_type}")
def Workflow_st6gal1(
    struct_img: np.ndarray,
    rescale_ratio: float = -1,
    output_type: str = "default",
    output_path: Union[str, Path] = None,
    fn: Union[str, Path] = None,
    output_func=None,
):
    """
    classic segmentation workflow wrapper for structure ST6GAL1

    Parameter:
    -----------
    struct_img: np.ndarray
        the 3D image to be segmented
    rescale_ratio: float
        an optional parameter to allow rescale the image before running the
        segmentation functions, default is no rescaling
    output_type: str
        select how to handle output. Currently, four types are supported:
        1. default: the result will be saved at output_path whose filename is
            original name without extention + "_struct_segmentaiton.tiff"
        2. array: the segmentation result will be simply returned as a numpy array
        3. array_with_contour: segmentation result will be returned together with
            the contour of the segmentation
        4. customize: pass in an extra output_func to do a special save. All the
            intermediate results, names of these results, the output_path, and the
            original filename (without extension) will be passed in to output_func.
    """
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [9, 19]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    cell_wise_min_area = 1200
    dot_3d_sigma = 1.6
    dot_3d_cutoff = 0.02
    minArea = 10
    thin_dist = 1
    thin_dist_preserve = 1.6
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append("im_norm")

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = zoom(struct_img, (1, rescale_ratio, rescale_ratio), order=2)

        struct_img = (struct_img - struct_img.min() + 1e-8) / (
            struct_img.max() - struct_img.min() + 1e-8
        )
        gaussian_smoothing_truncate_range = (
            gaussian_smoothing_truncate_range * rescale_ratio
        )

    # smoothing with gaussian filter
    structure_img_smooth = image_smoothing_gaussian_3d(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range,
    )

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append("im_smooth")

    ###################
    # core algorithm
    ###################

    # cell-wise local adaptive thresholding
    th_low_level = threshold_triangle(structure_img_smooth)

    bw_low_level = structure_img_smooth > th_low_level
    bw_low_level = remove_small_objects(
        bw_low_level, min_size=cell_wise_min_area, connectivity=1, in_place=True
    )
    bw_low_level = dilation(bw_low_level, selem=ball(2))

    bw_high_level = np.zeros_like(bw_low_level)
    lab_low, num_obj = label(bw_low_level, return_num=True, connectivity=1)

    for idx in range(num_obj):
        single_obj = lab_low == (idx + 1)
        local_otsu = threshold_otsu(structure_img_smooth[single_obj > 0])
        bw_high_level[
            np.logical_and(structure_img_smooth > local_otsu * 0.98, single_obj)
        ] = 1

    # LOG 3d to capture spots
    response = dot_3d(structure_img_smooth, log_sigma=dot_3d_sigma)
    bw_extra = response > dot_3d_cutoff

    # thinning
    bw_high_level = topology_preserving_thinning(
        bw_high_level, thin_dist_preserve, thin_dist
    )

    # combine the two parts
    bw = np.logical_or(bw_high_level, bw_extra)

    ###################
    # POST-PROCESSING
    ###################
    seg = remove_small_objects(bw > 0, min_size=minArea, connectivity=1, in_place=False)

    # output
    seg = seg > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append("bw_final")

    if output_type == "default":
        # the default final output, simply save it to the output path
        save_segmentation(seg, False, Path(output_path), fn)
    elif output_type == "customize":
        # the hook for passing in a customized output function
        # use "out_img_list" and "out_name_list" in your hook to
        # customize your output functions
        output_func(out_img_list, out_name_list, Path(output_path), fn)
    elif output_type == "array":
        return seg
    elif output_type == "array_with_contour":
        return (seg, generate_segmentation_contour(seg))
    else:
        raise NotImplementedError("invalid output type: {output_type}")