示例#1
0
def _get_axial_shifts(ndim=2, include_diagonals=False):
    r'''
    Helper function to generate the axial shifts that will be performed on
    the image to identify bordering pixels/voxels
    '''
    if ndim == 2:
        if include_diagonals:
            neighbors = square(3)
        else:
            neighbors = diamond(1)
        neighbors[1, 1] = 0
        x, y = np.where(neighbors)
        x -= 1
        y -= 1
        return np.vstack((x, y)).T
    else:
        if include_diagonals:
            neighbors = cube(3)
        else:
            neighbors = octahedron(1)
        neighbors[1, 1, 1] = 0
        x, y, z = np.where(neighbors)
        x -= 1
        y -= 1
        z -= 1
        return np.vstack((x, y, z)).T
示例#2
0
def _watershed_split(image,
                     labels,
                     points,
                     compactness=200,
                     connectivity_octahedron=7):
    """
    Split labels with using points as markers for watershed
    """
    connectivity = octahedron(connectivity_octahedron)
    points = np.round(points).astype(int)
    coords = tuple([points[:, i] for i in range(points.shape[1])])
    p_lab = labels[coords]
    p_lab = np.unique(p_lab)
    p_lab = p_lab[p_lab != 0]
    # generate a mask corresponding to the labels that need to be split
    mask = np.zeros(labels.shape, dtype=bool)
    for lab in p_lab:
        where = labels == lab
        mask = mask + where
    # split the labels using the points (in the masked image)
    markers_bool = np.zeros(labels.shape, dtype=bool)
    markers_bool[coords] = True
    markers, _ = ndi.label(markers_bool, output=labels.dtype)
    new_labels = watershed(
        image,
        markers=markers,
        mask=mask,
        compactness=compactness,
        connectivity=connectivity,
    )
    labels[mask] = new_labels[mask] + labels.max()
    return labels
示例#3
0
def denoise_roi(roi: np.ndarray,
                channel: Optional[Sequence[int]] = None) -> np.ndarray:
    """Denoise and further preprocess an image.
    
    Applies saturation, denoising, unsharp filtering, and erosion as image
    preprocessing for blob detection.

    Each step can be configured including turned off by
    :attr:`magmap.settings.config.roi_profiles`.
    
    Args:
        roi: Region of interest as a 3D (z, y, x) array. Note that 4D arrays 
            with channels are not allowed as the Scikit-Image gaussian filter 
            only accepts specifically 3 channels, presumably for RGB.
        channel: Sequence of channel indices in ``roi`` to
            saturate. Defaults to None to use all channels.
    
    Returns:
        Denoised region of interest.
    
    """
    multichannel, channels = setup_channels(roi, channel, 3)
    roi_out = None
    for chl in channels:
        # get single channel
        roi_show = roi[..., chl] if multichannel else roi
        settings = config.get_roi_profile(chl)
        # find gross density
        saturated_mean = np.mean(roi_show)

        # further saturation
        denoised = np.clip(roi_show, settings["clip_min"],
                           settings["clip_max"])

        tot_var_denoise = settings["tot_var_denoise"]
        if tot_var_denoise:
            # total variation denoising
            denoised = restoration.denoise_tv_chambolle(denoised,
                                                        weight=tot_var_denoise)

        # sharpening
        unsharp_strength = settings["unsharp_strength"]
        if unsharp_strength:
            blur_size = 8
            blurred = filters.gaussian(denoised, blur_size)
            high_pass = denoised - unsharp_strength * blurred
            denoised = denoised + high_pass

        # further erode denser regions to decrease overlap among blobs
        thresh_eros = settings["erosion_threshold"]
        if thresh_eros and saturated_mean > thresh_eros:
            #print("denoising for saturated mean of {}".format(saturated_mean))
            denoised = morphology.erosion(denoised, morphology.octahedron(1))
        if multichannel:
            if roi_out is None:
                roi_out = np.zeros(roi.shape, dtype=denoised.dtype)
            roi_out[..., chl] = denoised
        else:
            roi_out = denoised
    return roi_out
示例#4
0
def fun_generar_vol_random(tipo_forma, tam_limite, transp_forma):
    # Base volume generation
    if (tipo_forma == 1):
        # Random size
        tam_aux = rnd.randint(tam_limite[0], tam_limite[1])
        # Shape creation
        vol_forma = morphology.cube(tam_aux)

    elif (tipo_forma == 2):
        # Random size
        tam_aux = rnd.randint(tam_limite[0], tam_limite[1])
        # Shape creation
        vol_forma = morphology.ball(tam_aux)

    elif (tipo_forma == 3):
        # Random size
        tam_forma = (rnd.randint(tam_limite[0], tam_limite[1]),
                     rnd.randint(tam_limite[0], tam_limite[1]),
                     rnd.randint(tam_limite[0] * 2, tam_limite[1] * 2))
        # Shape creation
        vol_forma = fun_cilindro(tam_forma)

    elif (tipo_forma == 4):
        # Random size
        tam_aux = rnd.randint(tam_limite[0], tam_limite[1])
        # Shape creation
        vol_forma = morphology.octahedron(tam_aux)

    elif (tipo_forma == 5):
        # Random size
        tam_forma = (rnd.randint(tam_limite[0], tam_limite[1]),
                     rnd.randint(tam_limite[0], tam_limite[1]),
                     rnd.randint(tam_limite[0] * 2, tam_limite[1] * 2))
        # Shape creation
        vol_forma = fun_estrella_cil_vol(tam_forma)

    elif (tipo_forma == 6):
        # Random size
        tam_aux = rnd.randint(tam_limite[0], tam_limite[1])
        # Shape creation
        vol_forma = fun_estrella_rot_vol(tam_aux)

    # Shape label population
    label_forma = np.multiply(vol_forma, tipo_forma)
    # Transparency
    vol_forma = np.multiply(vol_forma, transp_forma)
    tam_forma = vol_forma.shape

    return (tam_forma, vol_forma, label_forma)
示例#5
0
def watershed_3d(feature, coherence, thresh_method, static):
    # kernel = cube(3)  # try other forms
    kernel = octahedron(1)
    assert thresh_method in ['otsu', 'static', 'static-inverted',
                             'binary'], 'threshing method unknown'
    if thresh_method == 'otsu':
        try:
            thresh = feature < threshold_otsu(
                feature)  # mask=True for background
        except ValueError:
            # if the feature is monochromatic
            return np.zeros_like(feature, dtype=bool)
    elif thresh_method == 'static-inverted':
        thresh = feature > static
    elif thresh_method == 'static':
        thresh = feature < static
    else:  # if the image is already binary
        thresh = (feature == 0)
    opened = opening(thresh, kernel)
    # opened = opening(opened, kernel)
    # opened = opening(opened, kernel)

    # sure background area
    # sure_bg = dilation(opened, kernel)
    # sure_bg = dilation(sure_bg, kernel)
    # sure_bg = dilation(sure_bg, kernel)
    # Finding sure foreground area
    dist_transform = distance_transform_edt(opened)
    sure_fg = dist_transform > coherence * dist_transform.max()

    # Finding unknown region
    unknown = (opened ^ sure_fg > 0)

    # Marker labelling
    markers = label(sure_fg, background=0)
    # Add one to all labels so that sure background is not 0, but 1
    markers += 1
    # Now, mark the region of unknown with zero
    markers[unknown] = 0
    try:
        water = watershed(-dist_transform, markers, mask=thresh)
    except NameError:
        water = watershed(-dist_transform, markers)
    return (water == 1)
示例#6
0
def objects():
    from skimage.morphology import (square, rectangle, diamond, disk, cube,
                                    octahedron, ball, octagon, star)
    from mpl_toolkits.mplot3d import Axes3D
    # Generate 2D and 3D structuring elements.
    struc_2d = {
        "square(15)": square(15),
        "rectangle(15, 10)": rectangle(15, 10),
        "diamond(7)": diamond(7),
        "disk(7)": disk(7),
        "octagon(7, 4)": octagon(7, 4),
        "star(5)": star(5)
    }

    struc_3d = {
        "cube(11)": cube(11),
        "octahedron(5)": octahedron(5),
        "ball(5)": ball(5)
    }

    # Visualize the elements.
    fig = plt.figure(figsize=(8, 8))

    idx = 1
    for title, struc in struc_2d.items():
        ax = fig.add_subplot(3, 3, idx)
        ax.imshow(struc, cmap="Paired", vmin=0, vmax=12)
        for i in range(struc.shape[0]):
            for j in range(struc.shape[1]):
                ax.text(j, i, struc[i, j], ha="center", va="center", color="w")
        ax.set_axis_off()
        ax.set_title(title)
        idx += 1

    for title, struc in struc_3d.items():
        ax = fig.add_subplot(3, 3, idx, projection=Axes3D.name)
        ax.voxels(struc)
        ax.set_title(title)
        idx += 1

    fig.tight_layout()
    plt.show()
    def SegmentationByTH(self, Imin, skeleton=False):
        """
        This function saves the segmentation based on the min and max threshold. If the min threshold is the final for
        a case (skeleton=True parameter), I remove small objects from the segmentation using the library
        skimage.morphology, and close holes using the binary closing from the same library.
        The function returns the threshold number and number of labels.


        :param Imin: the i-min th.
        :param skeleton: If true, the seg is saved with remove small objects and binary closing.
        :return: Min TH and number_of_components
        """
        nifti_file = nib.load(self.path_to_nifti)
        img_data = nifti_file.get_fdata()
        self.Imin = Imin
        img_data[img_data < Imin] = 0
        img_data[img_data >= Imin] = 1
        img_data[img_data > self.Imax] = 0
        self.img_data = img_data
        if not skeleton:
            try:
                new_nifti = nib.Nifti1Image(img_data, nifti_file.affine)
                nib.save(new_nifti,
                         f'{self.name}_seg_<{Imin}>_<{self.Imax}>.nii.gz')
                number_of_components = label(img_data).max()
                return Imin, number_of_components
            except:
                print(f'Something went wrong with {self.name}')
                return
        else:
            img_data = label(img_data)
            img_data = remove_small_objects(img_data,
                                            min_size=64,
                                            connectivity=24)
            img_data = binary_closing(img_data, selem=octahedron(2))
            new_nifti = nib.Nifti1Image(img_data.astype(np.float),
                                        nifti_file.affine)
            nib.save(new_nifti, f'{self.name}_SkeletonSegmentation.nii.gz')
            return
示例#8
0
def threshold(roi):
    """Thresholds the ROI, with options for various techniques as well as
    post-thresholding morphological filtering.
    
    Args:
        roi: Region of interest, given as [z, y, x].
    
    Returns:
        The thresholded region.
    """
    settings = config.roi_profile
    thresh_type = settings["thresholding"]
    size = settings["thresholding_size"]
    thresholded = roi
    roi_thresh = 0

    # various thresholding model
    if thresh_type == "otsu":
        try:
            roi_thresh = filters.threshold_otsu(roi, size)
            thresholded = roi > roi_thresh
        except ValueError as e:
            # np.histogram may give an error apparently if any NaN, so
            # workaround is set all elements in ROI to False
            print(e)
            thresholded = roi > np.max(roi)
    elif thresh_type == "local":
        roi_thresh = np.copy(roi)
        for i in range(roi_thresh.shape[0]):
            roi_thresh[i] = filters.threshold_local(roi_thresh[i],
                                                    size,
                                                    mode="wrap")
        thresholded = roi > roi_thresh
    elif thresh_type == "local-otsu":
        # TODO: not working yet
        selem = morphology.disk(15)
        print(np.min(roi), np.max(roi))
        roi_thresh = np.copy(roi)
        roi_thresh = libmag.normalize(roi_thresh, -1.0, 1.0)
        print(roi_thresh)
        print(np.min(roi_thresh), np.max(roi_thresh))
        for i in range(roi.shape[0]):
            roi_thresh[i] = filters.rank.otsu(roi_thresh[i], selem)
        thresholded = roi > roi_thresh
    elif thresh_type == "random_walker":
        thresholded = segmenter.segment_rw(roi, size)

    # dilation/erosion, adjusted based on overall intensity
    thresh_mean = np.mean(thresholded)
    print("thresh_mean: {}".format(thresh_mean))
    selem_dil = None
    selem_eros = None
    if thresh_mean > 0.45:
        thresholded = morphology.erosion(thresholded, morphology.cube(1))
        selem_dil = morphology.ball(1)
        selem_eros = morphology.octahedron(1)
    elif thresh_mean > 0.35:
        thresholded = morphology.erosion(thresholded, morphology.cube(2))
        selem_dil = morphology.ball(2)
        selem_eros = morphology.octahedron(1)
    elif thresh_mean > 0.3:
        selem_dil = morphology.ball(1)
        selem_eros = morphology.cube(5)
    elif thresh_mean > 0.1:
        selem_dil = morphology.ball(1)
        selem_eros = morphology.cube(4)
    elif thresh_mean > 0.05:
        selem_dil = morphology.octahedron(2)
        selem_eros = morphology.octahedron(2)
    else:
        selem_dil = morphology.octahedron(1)
        selem_eros = morphology.octahedron(2)
    if selem_dil is not None:
        thresholded = morphology.dilation(thresholded, selem_dil)
    if selem_eros is not None:
        thresholded = morphology.erosion(thresholded, selem_eros)
    return thresholded
示例#9
0
def segment_rw(roi,
               channel,
               beta=50.0,
               vmin=0.6,
               vmax=0.65,
               remove_small=None,
               erosion=None,
               blobs=None,
               get_labels=False):
    """Segments an image using the Random-Walker algorithm.
    
    Args:
        roi: Region of interest to segment.
        channel: Channel to pass to :func:``plot_3d.setup_channels``.
        beta: Random-Walker beta term.
        vmin: Values under which to exclude in markers; defaults to 0.6. 
            Ignored if ``blobs`` is given.
        vmax: Values above which to exclude in markers; defaults to 0.65. 
            Ignored if ``blobs`` is given.
        remove_small: Threshold size of small objects to remove; defaults 
            to None to ignore.
        erosion: Structuring element size for erosion; defaults 
            to None to ignore.
        blobs: Blobs to use for markers; defaults to None, in which 
            case markers will be determined based on ``vmin``/``vmax`` 
            thresholds.
        get_labels: True to measure and return labels from the 
            resulting segmentation instead of returning the segmentations 
            themselves; defaults to False.
    
    Returns:
        List of the Random-Walker segmentations for the given channels, 
        If ``get_labels`` is True, the measured labels for the segmented 
        regions will be returned instead of the segmentations themselves.
    """
    print("Random-Walker based segmentation...")
    labels = []
    walkers = []
    multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
    for i in channels:
        roi_segment = roi[..., i] if multichannel else roi
        if blobs is None:
            # mark unknown pixels as 0 by distinguishing known background
            # and foreground
            markers = np.zeros(roi_segment.shape, dtype=np.uint8)
            markers[roi_segment < vmin] = 2
            markers[roi_segment >= vmax] = 1
        else:
            # derive markers from blobs
            markers = _markers_from_blobs(roi_segment, blobs)

        # perform the segmentation; conjugate gradient with multigrid
        # preconditioner option (cg_mg), which is faster but req pyamg
        walker = segmentation.random_walker(roi_segment,
                                            markers,
                                            beta=beta,
                                            mode="cg_mg")

        # clean up segmentation

        #lib_clrbrain.show_full_arrays()
        walker = _carve_segs(walker, blobs)
        if remove_small:
            # remove artifacts
            walker = morphology.remove_small_objects(walker, remove_small)
        if erosion:
            # attempt to reduce label connections by eroding
            walker = morphology.erosion(walker, morphology.octahedron(erosion))

        if get_labels:
            # label neighboring pixels to segmented regions
            # TODO: check if necessary; useful only if blobs not given?
            label = measure.label(walker, background=0)
            labels.append(label)
            #print("label:\n", label)

        walkers.append(walker)
        #print("walker:\n", walker)

    if get_labels:
        return labels
    return walkers
from skimage.morphology import (square, rectangle, diamond, disk, cube,
                                octahedron, ball, octagon, star)

# Generate 2D and 3D structuring elements.
struc_2d = {
    "square(15)": square(15),
    "rectangle(15, 10)": rectangle(15, 10),
    "diamond(7)": diamond(7),
    "disk(7)": disk(7),
    "octagon(7, 4)": octagon(7, 4),
    "star(5)": star(5)
}

struc_3d = {
    "cube(11)": cube(11),
    "octahedron(5)": octahedron(5),
    "ball(5)": ball(5)
}

# Visualize the elements.
fig = plt.figure(figsize=(8, 8))

idx = 1
for title, struc in struc_2d.items():
    ax = fig.add_subplot(3, 3, idx)
    ax.imshow(struc, cmap="Paired", vmin=0, vmax=12)
    for i in range(struc.shape[0]):
        for j in range(struc.shape[1]):
            ax.text(j, i, struc[i, j], ha="center", va="center", color="w")
    ax.set_axis_off()
    ax.set_title(title)
示例#11
0
def find_somas(volume: np.ndarray, res: list) -> Tuple[int, np.ndarray, np.ndarray]:
    r"""Find bright neuron somas in an input volume.

    This simple soma detector assumes that somas are brighter than the
    rest of the objects contained in the input volume.

    To detect somas, these steps are performed:

    #. **Check input volume shape.** This detector requires the `x` and `y` dimensions of the input volumes to be larger than `20` pixels.

    #. **Zoom volume.** We found that this simple soma detector works best when then input volume has size `160 x 160 x 50`. We use `ndimage.zoom <https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.zoom.html>`_ to scale the input volume size to the desired shape.

    #. **Binarize volume.** We use `Otsu thresholding <https://scikit-image.org/docs/dev/api/skimage.filters.html#skimage.filters.threshold_otsu>`_ to binarize the image.

    #. **Erode the binarized image.** We erode the binarized image with a structuring element which size is directly proportional to the maximum zoom factor applied to the input volume.

    #. **Remove unreasonable connected components.** After erosion, we compute the equivalent diameter `d` of each connected component, and only keep those ones such that `5\mu m \leq d < 21 \mu m`

    #. **Find relative centroids.** Finally, we compute the centroids of the remaining connected components. The centroids are in voxel units, relative to the input volume.

    Parameters
    ----------
    volume : numpy.ndarray
        The 3D image array to run the detector on.

    res : list
        A `1 x 3` list containing the resolution of each voxel in `nm`.

    Returns
    -------
    label : bool
        A boolean value indicating whether the detector found any somas in the input volume.

    rel_centroids : numpy.ndarray
        A `N x 3` array containing the relative voxel positions of the detected somas.

    out : numpy.ndarray
        A `160 x 160 x 50` array containing the detection mask.

    Examples
    --------
    >>> # download a volume
    >>> dir = "s3://open-neurodata/brainlit/brain1"
    >>> dir_segments = "s3://open-neurodata/brainlit/brain1_segments"
    >>> volume_keys = "4807349.0_3827990.0_2922565.75_4907349.0_3927990.0_3022565.75"
    >>> mip = 1
    >>> ngl_sess = NeuroglancerSession(
    >>>     mip=mip, url=dir, url_segments=dir_segments, use_https=False
    >>> )
    >>> res = ngl_sess.cv_segments.scales[ngl_sess.mip]["resolution"]
    >>> volume_coords = np.array(os.path.basename(volume_keys).split("_")).astype(float)
    >>> volume_vox_min = np.round(np.divide(volume_coords[:3], res)).astype(int)
    >>> volume_vox_max = np.round(np.divide(volume_coords[3:], res)).astype(int)
    >>> bbox = Bbox(volume_vox_min, volume_vox_max)
    >>> img = ngl_sess.pull_bounds_img(bbox)
    >>> # apply soma detector
    >>> label, rel_centroids, out = find_somas(img, res)
    """

    check_type(volume, np.ndarray)
    check_iterable_type(volume.flatten(), np.uint16)
    volume_dim = volume.ndim
    if volume_dim != 3:
        raise ValueError("Input volume must be three-dimensional")
    if volume.shape[0] < 20 or volume.shape[1] < 20:
        raise ValueError("Input volume is too small")

    check_type(res, list)
    check_iterable_type(res, (int, float))
    if len(res) != 3:
        raise ValueError("Resolution must be three-dimensional")
    if np.any([el == 0 for el in res]):
        raise ValueError("Resolution must be non-zero at every position")

    desired_size = np.array([160, 160, 50])
    zoom_factors = np.divide(desired_size, volume.shape)
    res = np.divide(res, zoom_factors)
    out = ndimage.zoom(volume, zoom=zoom_factors)
    out = out / np.max(out.flatten())
    # 1) binarize volume using Otsu's method
    t = filters.threshold_otsu(out)
    out = out > t
    # 2) erode with structuring element proportional to zoom factors
    selem_size = np.amax(np.ceil(zoom_factors)).astype(int)
    clean_selem = morphology.octahedron(selem_size)
    out = morphology.erosion(out, clean_selem)
    # 3) identify connected components
    out, num_labels = morphology.label(out, background=0, return_num=True)
    # 4) remove connected components with diameter not in reasonable range, find centroids of candidate regions
    properties = ["label", "equivalent_diameter"]
    props = measure.regionprops_table(out, properties=properties)
    df_props = pd.DataFrame(props)
    rel_centroids = []
    for _, row in df_props.iterrows():
        l = row["label"]
        d = row["equivalent_diameter"]
        dmu = d * np.mean(res[:1]) / 1000
        if dmu < 5 or dmu >= 21:
            out[out == l] = 0
            num_labels -= 1
        else:
            ids = np.where(out == l)
            centroid = np.round([np.median(u) for u in ids]).astype(int)
            centroid = np.divide(centroid, zoom_factors)
            rel_centroids.append(centroid)
    return num_labels > 0, np.array(rel_centroids), out
示例#12
0
def label_objects(dataset=None, labels=None, out=None, out_features=None,
                  source=None, return_labels=False):
    DM = DataModel.instance()

    log.info('+ Loading data into memory')
    data = DM.load_slices(dataset)
    if labels is None:
        data += 1
        labels = set(np.unique(data)) - set([0])
    else:
        data += 1
        labels = np.asarray(labels) + 1

    obj_labels = []

    log.info('+ Extracting individual objects')
    new_labels = np.zeros(data.shape, np.int32)
    total_labels = 0
    num = 0

    for label in labels:
        mask = (data == label)
        tmp_data = data.copy()
        tmp_data[~mask] = 0
        tmp_labels, num = splabel(tmp_data, structure=octahedron(1))
        mask = (tmp_labels > 0)
        new_labels[mask] = tmp_labels[mask] + total_labels
        total_labels += num
        obj_labels += [label] * num

    log.info('+ {} Objects found'.format(total_labels))
    log.info('+ Saving results')
    DM.create_empty_dataset(out, DM.data_shape, new_labels.dtype)
    DM.write_slices(out, new_labels, params=dict(active=True, num_objects=total_labels))

    log.info('+ Loading source to memory')
    data = DM.load_slices(source)
    objs = new_labels
    objects = new_labels
    num_objects = total_labels
    objlabels = np.arange(1, num_objects+1)

    log.info('+ Computing Average intensity')
    feature = measure.mean(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[0], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[0], feature, params=dict(active=True))

    """log.info('+ Computing Median intensity')
    objs.shape = -1
    data.shape = -1
    feature = binned_statistic(objs, data, statistic='median',
                               bins=num_objects+1)[0]
    feature = feature[objlabels]
    out_features[1].write_direct(feature)
    out_features[1].attrs['active'] = True
    objs.shape = dataset.shape
    data.shape = dataset.shape"""

    log.info('+ Computing Sum of intensity')
    feature = measure.sum(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[1], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[1], feature, params=dict(active=True))

    log.info('+ Computing Standard Deviation of intensity')
    feature = measure.standard_deviation(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[2], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[2], feature, params=dict(active=True))

    log.info('+ Computing Variance of intensity')
    feature = measure.variance(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[3], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[3], feature, params=dict(active=True))

    log.info('+ Computing Area')
    objs.shape = -1
    feature = np.bincount(objs, minlength=num_objects+1)[1:]
    DM.create_empty_dataset(out_features[4], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[4], feature, params=dict(active=True))
    DM.create_empty_dataset(out_features[5], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[5], np.log10(feature), params=dict(active=True))
    objs.shape = data.shape

    log.info('+ Computing Bounding Box')
    obj_windows = measure.find_objects(objs)
    feature = []; depth = []; height = []; width = [];
    for w in obj_windows:
        feature.append((w[0].stop - w[0].start) *
                       (w[1].stop - w[1].start) *
                       (w[2].stop - w[2].start))
        depth.append(w[0].stop - w[0].start)
        height.append(w[1].stop - w[1].start)
        width.append(w[2].stop - w[2].start)

    feature = np.asarray(feature, np.float32)
    DM.create_empty_dataset(out_features[6], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[6], feature, params=dict(active=True))
    #depth
    depth = np.asarray(depth, np.float32)
    DM.create_empty_dataset(out_features[7], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[7], depth, params=dict(active=True))
    # height
    height = np.asarray(height, np.float32)
    DM.create_empty_dataset(out_features[8], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[8], height, params=dict(active=True))
    # width
    width = np.asarray(width, np.float32)
    DM.create_empty_dataset(out_features[9], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[9], width, params=dict(active=True))
    # log10
    DM.create_empty_dataset(out_features[10], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[10], np.log10(feature), params=dict(active=True))

    log.info('+ Computing Oriented Bounding Box')
    ori_feature = []; ori_depth = []; ori_height = []; ori_width = [];
    for i, w in enumerate(obj_windows):
        z, y, x = np.where(objs[w] == i+1)
        coords = np.c_[z, y, x]
        if coords.shape[0] >= 3:
            coords = PCA(n_components=3).fit_transform(coords)
        cmin, cmax = coords.min(0), coords.max(0)
        zz, yy, xx = (cmax[0] - cmin[0] + 1,
                      cmax[1] - cmin[1] + 1,
                      cmax[2] - cmin[2] + 1)
        ori_feature.append(zz * yy * xx)
        ori_depth.append(zz)
        ori_height.append(yy)
        ori_width.append(xx)

    ori_feature = np.asarray(ori_feature, np.float32)
    DM.create_empty_dataset(out_features[11], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[11], ori_feature, params=dict(active=True))
    #depth
    ori_depth = np.asarray(ori_depth, np.float32)
    DM.create_empty_dataset(out_features[12], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[12], ori_depth, params=dict(active=True))
    # height
    ori_height = np.asarray(ori_height, np.float32)
    DM.create_empty_dataset(out_features[13], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[13], ori_height, params=dict(active=True))
    # width
    ori_width = np.asarray(ori_width, np.float32)
    DM.create_empty_dataset(out_features[14], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[14], ori_width, params=dict(active=True))
    # log10
    DM.create_empty_dataset(out_features[15], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[15], np.log10(ori_feature), params=dict(active=True))

    log.info('+ Computing Positions')
    pos = measure.center_of_mass(objs, labels=objs, index=objlabels)
    pos = np.asarray(pos, dtype=np.float32)
    DM.create_empty_dataset(out_features[16], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[16], pos[:, 2].copy(), params=dict(active=True))
    DM.create_empty_dataset(out_features[17], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[17], pos[:, 1].copy(), params=dict(active=True))
    DM.create_empty_dataset(out_features[18], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[18], pos[:, 0].copy(), params=dict(active=True))

    if return_labels:
        return out, total_labels, np.asarray(obj_labels)

    return out, total_labels
    "rectangle(7, 11) (separable)": (rectangle(7, 11, decomposition=None),
                                     rectangle(7,
                                               11,
                                               decomposition="separable")),
    "rectangle(7, 11) (sequence)": (rectangle(7, 11, decomposition=None),
                                    rectangle(7, 11,
                                              decomposition="sequence")),
    "diamond(5) (sequence)": (diamond(5, decomposition=None),
                              diamond(5, decomposition="sequence")),
    "octagon(7, 4) (sequence)": (octagon(7, 4, decomposition=None),
                                 octagon(7, 4, decomposition="sequence")),
    "cube(11) (separable)": (cube(11, decomposition=None),
                             cube(11, decomposition="separable")),
    "cube(11) (sequence)": (cube(11, decomposition=None),
                            cube(11, decomposition="sequence")),
    "octahedron(7) (sequence)": (octahedron(7, decomposition=None),
                                 octahedron(7, decomposition="sequence")),
}

# Visualize the elements

# use a similar dark blue for the 2d plots as for the 3d voxel plots
cmap = colors.ListedColormap(['white', (0.1216, 0.4706, 0.70588)])
fontdict = dict(fontsize=16, fontweight='bold')
for title, (footprint, footprint_sequence) in footprint_dict.items():
    fig = plt.figure(figsize=(12, 4))
    ndim = footprint.ndim
    num_seq = len(footprint_sequence)
    if ndim == 2:
        ax = fig.add_subplot(1, num_seq + 1, num_seq + 1)
        ax.imshow(footprint, cmap=cmap, vmin=0, vmax=1)
示例#14
0
def label_splitter(src: DataURI, dst: DataURI, mode: String,
                   pipelines_id: DataURI, analyzers_id: DataURI,
                   annotations_id: DataURI, feature_id: DataURI,
                   split_ops: dict, background_label: Int) -> "SEGMENTATION":

    if mode == '1':
        src = DataModel.g.dataset_uri(ntpath.basename(pipelines_id),
                                      group="pipelines")
        print(f"Analyzer calc on pipeline src {src}")
    elif mode == '2':
        src = DataModel.g.dataset_uri(ntpath.basename(analyzers_id),
                                      group="analyzer")
        print(f"Analyzer calc on analyzer src {src}")
    elif mode == '3':
        src = DataModel.g.dataset_uri(ntpath.basename(annotations_id),
                                      group="annotations")
        print(f"Analyzer calc on annotation src {src}")

    with DatasetManager(src, out=None, dtype="int32", fillvalue=0) as DM:
        seg = DM.sources[0][:]
        logger.debug(f"src_dataset shape {seg[:].shape}")

    seg = seg.astype(np.uint32) & 15

    logger.debug(f"Calculating stats on feature: {feature_id}")
    src = DataModel.g.dataset_uri(ntpath.basename(feature_id),
                                  group="features")
    with DatasetManager(src, out=None, dtype="float32", fillvalue=0) as DM:
        feature_dataset_arr = DM.sources[0][:]

    # if labels==None:
    labels = set(np.unique(seg)) - set([background_label])
    logger.info(f"Labels in segmentation: {labels}")

    new_labels = np.zeros(seg.shape, np.int32)
    total_labels = 0
    obj_labels = []

    for label in labels:
        mask = seg == label
        tmp_data = seg.copy()
        tmp_data[~mask] = 0
        tmp_labels, num = ndimage.measurements.label(tmp_data,
                                                     structure=octahedron(1))
        mask = tmp_labels > 0
        new_labels[mask] = tmp_labels[mask] + total_labels
        total_labels += num
        obj_labels += [label] * num

    print(f"Number of unique labels: {np.unique(new_labels)}")

    objs = new_labels
    #objects = new_labels
    num_objects = total_labels
    logger.debug(f"Number of objects {num_objects}")
    objlabels = np.arange(1, num_objects + 1)

    obj_windows = measure.find_objects(objs)
    feature = []
    depth = []
    height = []
    width = []
    for w in obj_windows:
        feature.append((w[0].stop - w[0].start) * (w[1].stop - w[1].start) *
                       (w[2].stop - w[2].start))
        depth.append(w[0].stop - w[0].start)
        height.append(w[1].stop - w[1].start)
        width.append(w[2].stop - w[2].start)

    ori_feature = []
    ori_depth = []
    ori_height = []
    ori_width = []

    for i, w in enumerate(obj_windows):
        z, y, x = np.where(objs[w] == i + 1)
        coords = np.c_[z, y, x]
        if coords.shape[0] >= 3:
            coords = PCA(n_components=3).fit_transform(coords)
        cmin, cmax = coords.min(0), coords.max(0)
        zz, yy, xx = (
            cmax[0] - cmin[0] + 1,
            cmax[1] - cmin[1] + 1,
            cmax[2] - cmin[2] + 1,
        )
        ori_feature.append(zz * yy * xx)
        ori_depth.append(zz)
        ori_height.append(yy)
        ori_width.append(xx)

    feature_ori = np.asarray(ori_feature, np.float32)
    feature_ori_depth = np.asarray(ori_depth, np.float32)
    feature_ori_height = np.asarray(ori_height, np.float32)
    feature_ori_width = np.asarray(ori_width, np.float32)
    feature_ori_log10 = np.log10(feature_ori)

    feature_bb_vol = np.asarray(feature, np.float32)
    feature_bb_vol_log10 = np.log10(feature_bb_vol)
    feature_bb_depth = np.asarray(depth, np.float32)
    feature_bb_height = np.asarray(height, np.float32)
    feature_bb_width = np.asarray(width, np.float32)

    feature_sum = ndimage.measurements.sum(feature_dataset_arr,
                                           objs,
                                           index=objlabels)
    feature_mean = ndimage.measurements.mean(feature_dataset_arr,
                                             objs,
                                             index=objlabels)
    feature_std = ndimage.measurements.standard_deviation(feature_dataset_arr,
                                                          objs,
                                                          index=objlabels)
    feature_var = ndimage.measurements.variance(feature_dataset_arr,
                                                objs,
                                                index=objlabels)

    feature_pos = measure.center_of_mass(objs, labels=objs, index=objlabels)
    feature_pos = np.asarray(feature_pos, dtype=np.float32)

    features_df = pd.DataFrame({
        "Sum": feature_sum,
        "Mean": feature_mean,
        "Std": feature_std,
        "Var": feature_var,
        "z": feature_pos[:, 0],
        "x": feature_pos[:, 1],
        "y": feature_pos[:, 2],
        "bb_volume": feature_bb_vol,
        "bb_vol_log10": feature_bb_vol_log10,
        "bb_depth": feature_bb_depth,
        "bb_height": feature_bb_height,
        "bb_width": feature_bb_width,
        "ori_volume": feature_ori,
        "ori_vol_log10": feature_ori_log10,
        "ori_depth": feature_ori_depth,
        "ori_height": feature_ori_height,
        "ori_width": feature_ori_width,
    })

    sel_start, sel_end = 0, len(features_df)
    features_array = np.array([[
        np.int32(np.float32(features_df.iloc[i]["z"])),
        np.int32(np.float32(features_df.iloc[i]["x"])),
        np.int32(np.float32(features_df.iloc[i]["y"])),
        np.float32(np.float32(features_df.iloc[i]["Sum"])),
        np.float32(np.float32(features_df.iloc[i]["Mean"])),
        np.float32(np.float32(features_df.iloc[i]["Std"])),
        np.float32(np.float32(features_df.iloc[i]["Var"])),
        np.float32(np.float32(features_df.iloc[i]["bb_volume"])),
        np.float32(np.float32(features_df.iloc[i]["bb_vol_log10"])),
        np.float32(np.float32(features_df.iloc[i]["bb_depth"])),
        np.float32(np.float32(features_df.iloc[i]["bb_height"])),
        np.float32(np.float32(features_df.iloc[i]["bb_width"])),
        np.float32(np.float32(features_df.iloc[i]["ori_volume"])),
        np.float32(np.float32(features_df.iloc[i]["ori_vol_log10"])),
        np.float32(np.float32(features_df.iloc[i]["ori_depth"])),
        np.float32(np.float32(features_df.iloc[i]["ori_height"])),
        np.float32(np.float32(features_df.iloc[i]["ori_width"]))
    ] for i in range(sel_start, sel_end)])

    print(split_ops)
    rules = []
    for k in split_ops.keys():
        if k != 'context':
            split_op_card = split_ops[k]
            print(split_op_card)
            split_feature_index = int(split_op_card["split_feature_index"])
            split_op = int(split_op_card["split_op"])
            split_threshold = float(split_op_card["split_threshold"])

            if int(split_op) > 0:
                calculate = True
                s = int(split_op) - 1  # split_op starts at 1
                feature_names = [
                    "z", "x", "y", "Sum", "Mean", "Std", "Var", "bb_vol",
                    "bb_vol_log10", "bb_vol_depth", "bb_vol_depth",
                    "bb_vol_height", "bb_vol_width", "ori_vol",
                    "ori_vol_log10", "ori_vol_depth", "ori_vol_depth",
                    "ori_vol_height", "ori_vol_width"
                ]
                feature_index = int(
                    split_feature_index
                )  # feature_names.index(split_feature_index)
                rules.append((int(feature_index), s, split_threshold))
                print(
                    f"Adding split rule: {split_feature_index} {split_op} {split_threshold}"
                )
            else:
                calculate = False

    if calculate:
        masked_out, result_features = apply_rules(features_array, -1, rules,
                                                  np.array(objlabels),
                                                  num_objects)
        print(f"Masking out: {masked_out}")
        bg_label = max(np.unique(new_labels))
        print(f"Masking out bg label {bg_label}")
        for i, l in enumerate(masked_out):
            if l != -1:
                new_labels[new_labels == l] = 0

        new_labels[new_labels == bg_label] = 0
        new_labels = (new_labels > 0) * 1.0
    else:
        result_features = features_array

    map_blocks(pass_through, new_labels, out=dst, normalize=False)

    return result_features, features_array
示例#15
0
def refine_label(data=None, label=None, method=None, radius=1, slide=None):
    ds = data
    data = DM.load_slices(ds)
    rshape = DM.region_shape()

    zmin = DM.active_roi[0].start
    ymin = DM.active_roi[1].start
    xmin = DM.active_roi[2].start

    if method == 'fill_holes':
        radius = 1

    if type(slide) == str:
        mask = (data == label)
        if slide == '3D':
            msize = max(rshape)
            selem = octahedron(radius)
        else:
            msize = max(rshape[1:])
            selem = diamond(radius)
    else:
        msize = max(rshape[1:])
        mask = (data[slide] == label)
        selem = diamond(radius)

    if radius > np.sqrt(msize):
        raise Exception('Radius too large')

    funcs = {
        'dilation': binary_dilation,
        'erosion': binary_erosion,
        'opening': binary_opening,
        'closing': binary_closing,
        'fill_holes': binary_fill_holes
    }

    if method not in funcs:
        log.info('+ Refinement {} not supported'.format(method))
        return None

    log.info('+ Performing {} refinement ({})'.format(method, slide))
    if slide != '2D' and mask.any():
        result = funcs[method](mask, structure=selem)
    else:
        result = np.zeros(data.shape, dtype=np.bool)
        f = funcs[method]
        for i in range(data.shape[0]):
            if mask[i].any():
                result[i] = f(mask[i], structure=selem)

    log.info('+ Calculating changes..')
    changes = (result != mask)
    if type(slide) == str:
        values = data[changes]
        data[mask] = -1
        data[result] = label
        changes = np.column_stack(np.where(changes)) + np.array(
            [zmin, ymin, xmin], np.int32)
    else:
        values = data[slide, changes]
        data[slide, mask] = -1
        data[slide, result] = label
        changes = np.column_stack(np.where(changes[None, ...])) + np.array(
            [zmin + slide, ymin, xmin], np.int32)

    DM.write_slices(ds, data)

    log.info('+ done.')
    return changes, values