Example #1
0
def seeds(args):
    """
    %prog seeds [pngfile|jpgfile]

    Extract seed metrics from [pngfile|jpgfile]. Use --rows and --cols to crop image.
    """
    p = OptionParser(seeds.__doc__)
    p.set_outfile()
    opts, args, iopts = add_seeds_options(p, args)

    if len(args) != 1:
        sys.exit(not p.print_help())

    pngfile, = args
    pf = opts.prefix or op.basename(pngfile).rsplit(".", 1)[0]
    sigma, kernel = opts.sigma, opts.kernel
    rows, cols = opts.rows, opts.cols
    labelrows, labelcols = opts.labelrows, opts.labelcols
    ff = opts.filter
    calib = opts.calibrate
    outdir = opts.outdir
    if outdir != '.':
        mkdir(outdir)
    if calib:
        calib = json.load(must_open(calib))
        pixel_cm_ratio, tr = calib["PixelCMratio"], calib["RGBtransform"]
        tr = np.array(tr)

    pngfile = convert_background(pngfile)
    resizefile, mainfile, labelfile, exif = \
                      convert_image(pngfile, pf, outdir=outdir,
                                    rotate=opts.rotate,
                                    rows=rows, cols=cols,
                                    labelrows=labelrows, labelcols=labelcols)

    oimg = load_image(resizefile)
    img = load_image(mainfile)

    fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4,
                                             nrows=1,
                                             figsize=(iopts.w, iopts.h))
    # Edge detection
    img_gray = rgb2gray(img)
    logging.debug("Running {0} edge detection ...".format(ff))
    if ff == "canny":
        edges = canny(img_gray, sigma=opts.sigma)
    elif ff == "roberts":
        edges = roberts(img_gray)
    elif ff == "sobel":
        edges = sobel(img_gray)
    edges = clear_border(edges, buffer_size=opts.border)
    selem = disk(kernel)
    closed = closing(edges, selem) if kernel else edges
    filled = binary_fill_holes(closed)

    # Watershed algorithm
    if opts.watershed:
        distance = distance_transform_edt(filled)
        local_maxi = peak_local_max(distance, threshold_rel=.05, indices=False)
        coordinates = peak_local_max(distance, threshold_rel=.05)
        markers, nmarkers = label(local_maxi, return_num=True)
        logging.debug("Identified {0} watershed markers".format(nmarkers))
        labels = watershed(closed, markers, mask=filled)
    else:
        labels = label(filled)

    # Object size filtering
    w, h = img_gray.shape
    canvas_size = w * h
    min_size = int(round(canvas_size * opts.minsize / 100))
    max_size = int(round(canvas_size * opts.maxsize / 100))
    logging.debug("Find objects with pixels between {0} ({1}%) and {2} ({3}%)"\
                    .format(min_size, opts.minsize, max_size, opts.maxsize))

    # Plotting
    ax1.set_title('Original picture')
    ax1.imshow(oimg)

    params = "{0}, $\sigma$={1}, $k$={2}".format(ff, sigma, kernel)
    if opts.watershed:
        params += ", watershed"
    ax2.set_title('Edge detection\n({0})'.format(params))
    closed = gray2rgb(closed)
    ax2_img = labels
    if opts.edges:
        ax2_img = closed
    elif opts.watershed:
        ax2.plot(coordinates[:, 1], coordinates[:, 0], 'g.')
    ax2.imshow(ax2_img, cmap=iopts.cmap)

    ax3.set_title('Object detection')
    ax3.imshow(img)

    filename = op.basename(pngfile)
    if labelfile:
        accession = extract_label(labelfile)
    else:
        accession = pf

    # Calculate region properties
    rp = regionprops(labels)
    rp = [x for x in rp if min_size <= x.area <= max_size]
    nb_labels = len(rp)
    logging.debug("A total of {0} objects identified.".format(nb_labels))
    objects = []
    for i, props in enumerate(rp):
        i += 1
        if i > opts.count:
            break

        y0, x0 = props.centroid
        orientation = props.orientation
        major, minor = props.major_axis_length, props.minor_axis_length
        major_dx = cos(orientation) * major / 2
        major_dy = sin(orientation) * major / 2
        minor_dx = sin(orientation) * minor / 2
        minor_dy = cos(orientation) * minor / 2
        ax2.plot((x0 - major_dx, x0 + major_dx),
                 (y0 + major_dy, y0 - major_dy), 'r-')
        ax2.plot((x0 - minor_dx, x0 + minor_dx),
                 (y0 - minor_dy, y0 + minor_dy), 'r-')

        npixels = int(props.area)
        # Sample the center of the blob for color
        d = min(int(round(minor / 2 * .35)) + 1, 50)
        x0d, y0d = int(round(x0)), int(round(y0))
        square = img[(y0d - d):(y0d + d), (x0d - d):(x0d + d)]
        pixels = []
        for row in square:
            pixels.extend(row)
        logging.debug("Seed #{0}: {1} pixels ({2} sampled) - {3:.2f}%".\
                        format(i, npixels, len(pixels), 100. * npixels / canvas_size))

        rgb = pixel_stats(pixels)
        objects.append(Seed(filename, accession, i, rgb, props, exif))
        minr, minc, maxr, maxc = props.bbox
        rect = Rectangle((minc, minr),
                         maxc - minc,
                         maxr - minr,
                         fill=False,
                         ec='w',
                         lw=1)
        ax3.add_patch(rect)
        mc, mr = (minc + maxc) / 2, (minr + maxr) / 2
        ax3.text(mc,
                 mr,
                 "{0}".format(i),
                 color='w',
                 ha="center",
                 va="center",
                 size=6)

    for ax in (ax2, ax3):
        ax.set_xlim(0, h)
        ax.set_ylim(w, 0)

    # Output identified seed stats
    ax4.text(.1, .92, "File: {0}".format(latex(filename)), color='g')
    ax4.text(.1, .86, "Label: {0}".format(latex(accession)), color='m')
    yy = .8
    fw = must_open(opts.outfile, "w")
    if not opts.noheader:
        print(Seed.header(calibrate=calib), file=fw)
    for o in objects:
        if calib:
            o.calibrate(pixel_cm_ratio, tr)
        print(o, file=fw)
        i = o.seedno
        if i > 7:
            continue
        ax4.text(.01, yy, str(i), va="center", bbox=dict(fc='none', ec='k'))
        ax4.text(.1, yy, o.pixeltag, va="center")
        yy -= .04
        ax4.add_patch(
            Rectangle((.1, yy - .025), .12, .05, lw=0, fc=rgb_to_hex(o.rgb)))
        ax4.text(.27, yy, o.hashtag, va="center")
        yy -= .06
    ax4.text(.1,
             yy,
             "(A total of {0} objects displayed)".format(nb_labels),
             color="darkslategray")
    normalize_axes(ax4)

    for ax in (ax1, ax2, ax3):
        xticklabels = [int(x) for x in ax.get_xticks()]
        yticklabels = [int(x) for x in ax.get_yticks()]
        ax.set_xticklabels(xticklabels, family='Helvetica', size=8)
        ax.set_yticklabels(yticklabels, family='Helvetica', size=8)

    image_name = op.join(outdir, pf + "." + iopts.format)
    savefig(image_name, dpi=iopts.dpi, iopts=iopts)
    return objects
Example #2
0
def fix_border(spline, sample_points):
    border = (~ get_convex_hull(sample_points, spline.shape))
    ind = distance_transform_edt(border, return_distances = False, 
                                 return_indices = True)
    return spline[tuple(ind)]
Example #3
0
def segment_images(inpDir, outDir, config_data):
    """ Workflow for dot like shapes such as
    Centrin-2, Desmoplakin, PMP34. 

    Args:
        inpDir : path to the input directory
        outDir : path to the output directory
        config_data : path to the configuration file
    """

    logging.basicConfig(
        format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
        datefmt='%d-%b-%y %H:%M:%S')
    logger = logging.getLogger("main")
    logger.setLevel(logging.INFO)

    inpDir_files = os.listdir(inpDir)
    for i, f in enumerate(inpDir_files):
        logger.info('Segmenting image : {}'.format(f))

        # Load an image
        br = BioReader(os.path.join(inpDir, f))
        image = br.read_image()
        structure_channel = 0
        struct_img0 = image[:, :, :, structure_channel, 0]
        struct_img0 = struct_img0.transpose(2, 0, 1).astype(np.float32)

        # main algorithm
        intensity_scaling_param = config_data['intensity_scaling_param']
        struct_img = intensity_normalization(
            struct_img0, scaling_param=intensity_scaling_param)

        gaussian_smoothing_sigma = config_data['gaussian_smoothing_sigma']
        if config_data["gaussian_smoothing"] == "gaussian_slice_by_slice":
            structure_img_smooth = image_smoothing_gaussian_slice_by_slice(
                struct_img, sigma=gaussian_smoothing_sigma)
        else:
            structure_img_smooth = image_smoothing_gaussian_3d(
                struct_img, sigma=gaussian_smoothing_sigma)
        s3_param = config_data['s3_param']
        bw = dot_3d_wrapper(structure_img_smooth, s3_param)
        minArea = config_data['minArea']
        Mask = remove_small_objects(bw > 0,
                                    min_size=minArea,
                                    connectivity=1,
                                    in_place=False)
        Seed = dilation(peak_local_max(struct_img,
                                       labels=label(Mask),
                                       min_distance=2,
                                       indices=False),
                        selem=ball(1))
        Watershed_Map = -1 * distance_transform_edt(bw)
        seg = watershed(Watershed_Map,
                        label(Seed),
                        mask=Mask,
                        watershed_line=True)
        seg = remove_small_objects(seg > 0,
                                   min_size=minArea,
                                   connectivity=1,
                                   in_place=False)
        seg = seg > 0
        out_img = seg.astype(np.uint8)
        out_img[out_img > 0] = 255

        # create output image
        out_img = out_img.transpose(1, 2, 0)
        out_img = out_img.reshape(
            (out_img.shape[0], out_img.shape[1], out_img.shape[2], 1, 1))

        # write image using BFIO
        bw = BioWriter(os.path.join(outDir, f))
        bw.num_x(out_img.shape[1])
        bw.num_y(out_img.shape[0])
        bw.num_z(out_img.shape[2])
        bw.num_c(out_img.shape[3])
        bw.num_t(out_img.shape[4])
        bw.pixel_type(dtype='uint8')
        bw.write_image(out_img)
        bw.close_image()
Example #4
0
    img = pyplot.imread(image_name)

    img_gray = rgb2gray(img)

    thresh = threshold_otsu(
        img_gray)  # return threshold value based on on otsu's method

    if bright_background:
        foreground_mask = img_gray <= thresh  # for bright background
    else:
        foreground_mask = img_gray > thresh  # for dark background

    # compute the Euclidean distance from every binary pixel to the nearest zero pixel
    # and then find peaks in this distance map
    #
    distance = ndimage.distance_transform_edt(foreground_mask)

    # return a boolean array shaped like image, with peaks represented by True values
    localMax = peak_local_max(distance,
                              indices=False,
                              min_distance=30,
                              labels=foreground_mask)

    # perform a connected component analysis on the local peaks using 8-connectivity
    markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]

    # apply the Watershed algorithm
    labels = watershed(-distance, markers, mask=foreground_mask)

    print ' [x] Analyzing image %s' % (image_name)
    print ' [x] there are %d segments found' % (len(np.unique(labels)) - 1)
def absolute_distance(skeleton, mask):
    distance = ndi.distance_transform_edt(np.invert(skeleton.astype(bool)),
                                          return_distances=True,
                                          return_indices=False)
    return distance
Example #6
0
# load the image and perform pyramid mean shift filtering
# to aid the thresholding step
shifted = cv2.pyrMeanShiftFiltering(image, 21, 51)
cv2.imshow("Shifted", shifted)

# convert the mean shift image to grayscale, then apply
# Otsu's thresholding
gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv2.imshow("Threshold Image", thresh)

# compute the exact Euclidean distance from every binary
# pixel to the nearest zero pixel, then find peaks in this
# distance map
D = ndimage.distance_transform_edt(thresh)
localMax = peak_local_max(D, indices=False, min_distance=20, labels=thresh)

# perform a connected component analysis on the local peaks,
# using 8-connectivity, then appy the Watershed algorithm
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=thresh)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))

# loop over the unique labels returned by the Watershed
# algorithm
for label in np.unique(labels):
    # if the label is zero, we are examining the 'background'
    # so simply ignore it
    if label == 0:
        continue
    for config_file in config_files:
        print('[Data] reading ' + config_folder + config_file)
        config = utils.read_json(config_folder + config_file)

        if config['synthetic_sdf']:
            truncation = config['truncation']

            filled_file = common.filename(config, 'filled_file')
            filled = utils.read_hdf5(filled_file)
            print('[Data] read ' + filled_file)

            sdfs = np.zeros((filled.shape))
            sdfs = np.squeeze(sdfs)

            for n in range(filled.shape[0]):
                positive_df = ndimage.distance_transform_edt(1 - filled[n])
                negative_df = ndimage.distance_transform_edt(filled[n])

                sdf = np.zeros(filled[n].shape)
                mask = (filled[n] == 1)
                sdf[mask] = -negative_df[mask]
                mask = (filled[n] == 0)
                sdf[mask] = positive_df[mask]

                sdfs[n] = sdf
                print('[Data] output sdf %d/%d' % (n + 1, filled.shape[0]))

            tsdfs = sdfs.copy()
            tsdfs[tsdfs > truncation] = truncation
            tsdfs[tsdfs < -truncation] = -truncation
Example #8
0
 def update(self, obstMap):
     self.map = ndimage.distance_transform_edt(
         (~obstMap.map).astype(int)) * self.cellSize
# Loop through all tif files in input folder
Images = (glob.glob(InputPath+"/*.tif"))
for img in Images:

    # Read image
    I = io.imread(img)

    # Processing

    # Gaussian filter
    if IntensityBlurRad >= 1:
        I = 255*gaussian(I, sigma=IntensityBlurRad)

    # Adaptive threshold
    mask = threshold_adaptive(I, RadThr, offset = Thr).astype(np.uint8)

    # Binary watershed
    distance = ndimage.distance_transform_edt(mask)
    distance = gaussian(distance, sigma=DistanceBlurRad)
    local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((3, 3)), labels=mask)
    markers = morphology.label(local_maxi)
    nuclei_labels = watershed(-distance, markers, mask=mask)
    nuclei_labels = nuclei_labels.astype(np.uint16)
    nuclei_labels = remove_small_objects(nuclei_labels, min_size=MinSize)
    nuclei_labels = skimage.segmentation.relabel_sequential(nuclei_labels)[0]
    nuclei_labels = nuclei_labels.astype(np.uint16)

    # Write label mask
    filename = os.path.basename(img)
    io.imsave(OutputPath+"/"+filename, nuclei_labels)
Example #10
0
def expand_labels(label_image, distance=1):
    """Expand labels in label image by ``distance`` pixels without overlapping.

    Given a label image, ``expand_labels`` grows label regions (connected components)
    outwards by up to ``distance`` pixels without overflowing into neighboring regions.
    More specifically, each background pixel that is within Euclidean distance
    of <= ``distance`` pixels of a connected component is assigned the label of that
    connected component.
    Where multiple connected components are within ``distance`` pixels of a background
    pixel, the label value of the closest connected component will be assigned (see
    Notes for the case of multiple labels at equal distance).

    Parameters
    ----------
    label_image : ndarray of dtype int
        label image
    distance : float
        Euclidean distance in pixels by which to grow the labels. Default is one.

    Returns
    -------
    enlarged_labels : ndarray of dtype int
        Labeled array, where all connected regions have been enlarged

    Notes
    -----
    Where labels are spaced more than ``distance`` pixels are apart, this is
    equivalent to a morphological dilation with a disc or hyperball of radius ``distance``.
    However, in contrast to a morphological dilation, ``expand_labels`` will
    not expand a label region into a neighboring region.  

    This implementation of ``expand_labels`` is derived from CellProfiler [1]_, where
    it is known as module "IdentifySecondaryObjects (Distance-N)" [2]_.

    There is an important edge case when a pixel has the same distance to
    multiple regions, as it is not defined which region expands into that
    space. Here, the exact behavior depends on the upstream implementation
    of ``scipy.ndimage.distance_transform_edt``.

    See Also
    --------
    :func:`skimage.measure.label`, :func:`skimage.segmentation.watershed`, :func:`skimage.morphology.dilation`

    References
    ----------
    .. [1] https://cellprofiler.org
    .. [2] https://github.com/CellProfiler/CellProfiler/blob/082930ea95add7b72243a4fa3d39ae5145995e9c/cellprofiler/modules/identifysecondaryobjects.py#L559

    Examples
    --------
    >>> labels = np.array([0, 1, 0, 0, 0, 0, 2])
    >>> expand_labels(labels, distance=1)
    array([1, 1, 1, 0, 0, 2, 2])

    Labels will not overwrite each other:

    >>> expand_labels(labels, distance=3)
    array([1, 1, 1, 1, 2, 2, 2])

    In case of ties, behavior is undefined, but currently resolves to the
    label closest to ``(0,) * ndim`` in lexicographical order.

    >>> labels_tied = np.array([0, 1, 0, 2, 0])
    >>> expand_labels(labels_tied, 1)
    array([1, 1, 1, 2, 2])
    >>> labels2d = np.array(
    ...     [[0, 1, 0, 0],
    ...      [2, 0, 0, 0],
    ...      [0, 3, 0, 0]]
    ... )
    >>> expand_labels(labels2d, 1)
    array([[2, 1, 1, 0],
           [2, 2, 0, 0],
           [2, 3, 3, 0]])
    """

    distances, nearest_label_coords = distance_transform_edt(
        label_image == 0, return_indices=True)
    labels_out = np.zeros_like(label_image)
    dilate_mask = distances <= distance
    # build the coordinates to find nearest labels,
    # in contrast to [1] this implementation supports label arrays
    # of any dimension
    masked_nearest_label_coords = [
        dimension_indices[dilate_mask]
        for dimension_indices in nearest_label_coords
    ]
    nearest_labels = label_image[tuple(masked_nearest_label_coords)]
    labels_out[dilate_mask] = nearest_labels
    return labels_out
    def run(self, workspace):
        objects_name = self.objects_name.value
        objects = workspace.object_set.get_objects(objects_name)
        assert isinstance(objects, cpo.Objects)
        labels = objects.segmented
        if self.relabel_option == OPTION_SPLIT:
            output_labels, count = scind.label(labels > 0, np.ones((3, 3), bool))
        else:
            if self.unify_option == UNIFY_DISTANCE:
                mask = labels > 0
                if self.distance_threshold.value > 0:
                    #
                    # Take the distance transform of the reverse of the mask
                    # and figure out what points are less than 1/2 of the
                    # distance from an object.
                    #
                    d = scind.distance_transform_edt(~mask)
                    mask = d < self.distance_threshold.value / 2 + 1
                output_labels, count = scind.label(mask, np.ones((3, 3), bool))
                output_labels[labels == 0] = 0
                if self.wants_image:
                    output_labels = self.filter_using_image(workspace, mask)
            elif self.unify_option == UNIFY_PARENT:
                parents_name = self.parent_object.value
                parents_of = workspace.measurements[
                    objects_name, "_".join((C_PARENT, parents_name))]
                output_labels = labels.copy().astype(np.uint32)
                output_labels[labels > 0] = parents_of[labels[labels > 0] - 1]
                if self.unification_method == UM_CONVEX_HULL:
                    ch_pts, n_pts = morph.convex_hull(output_labels)
                    ijv = morph.fill_convex_hulls(ch_pts, n_pts)
                    output_labels[ijv[:, 0], ijv[:, 1]] = ijv[:, 2]

        output_objects = cpo.Objects()
        output_objects.segmented = output_labels
        if objects.has_small_removed_segmented:
            output_objects.small_removed_segmented = \
                copy_labels(objects.small_removed_segmented, output_labels)
        if objects.has_unedited_segmented:
            output_objects.unedited_segmented = \
                copy_labels(objects.unedited_segmented, output_labels)
        output_objects.parent_image = objects.parent_image
        workspace.object_set.add_objects(output_objects, self.output_objects_name.value)

        measurements = workspace.measurements
        add_object_count_measurements(measurements,
                                      self.output_objects_name.value,
                                      np.max(output_objects.segmented))
        add_object_location_measurements(measurements,
                                         self.output_objects_name.value,
                                         output_objects.segmented)

        #
        # Relate the output objects to the input ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(output_objects)
        measurements.add_measurement(self.objects_name.value,
                                     FF_CHILDREN_COUNT %
                                     self.output_objects_name.value,
                                     children_per_parent)
        measurements.add_measurement(self.output_objects_name.value,
                                     FF_PARENT % self.objects_name.value,
                                     parents_of_children)
        if self.wants_outlines:
            outlines = centrosome.outline.outline(output_labels)
            outline_image = cpi.Image(outlines.astype(bool))
            workspace.image_set.add(self.outlines_name.value,
                                    outline_image)

        if self.show_window:
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.output_labels = output_objects.segmented
            if self.unify_option == UNIFY_PARENT:
                workspace.display_data.parent_labels = \
                    workspace.object_set.get_objects(self.parent_object.value).segmented
Example #12
0
def get_neg_dst_transform(label_unmodified,
                          img,
                          instance,
                          strategy,
                          ignore_classes,
                          old_label=None,
                          dt_method="edt"):
    """
  :param img: input image: this would be used to calculate geodesic distance.
  :param ignore_classes: 
  :param dt_method: 'edt' for euclidean distance and 'geodesic' for geodesic distance.
  :param old_label: old label, if available
  :param label_unmodified: unmodified label which contains all the instances
  :param instance: The instance number to segment
  :param strategy: value in [1,2,3]
          1 - Generate random clicks from the background, which is D pixels away from the object.
          2 - Generate random clicks on each negative object.
          3 - Generate random clicks around the object boundary.
  :return: Negative distance transform map
  """
    label = np.where(label_unmodified == instance, 1, 0)
    g_c = get_image_area_to_sample(label)

    pts = []

    if strategy in [1, 3]:
        if strategy == 1:
            num_neg_clicks = random.sample(list(range(0, Nneg1 + 1)), 1)
            pts = get_sampled_locations(np.where(g_c == 1), g_c,
                                        num_neg_clicks)
        else:
            # First negative click is randomly sampled in g_c
            pts = get_sampled_locations(np.where(g_c == 1), g_c, [1])
            g_c_copy = np.copy(g_c)
            g_c_copy[list(zip(*(val for val in pts)))] = 0
            dt = distance_transform_edt(g_c_copy)
            # Sample successive points using p_next = arg max f(p_ij | s0 U g), where p_ij in g_c, s0 is the set of all
            # sampled points, and 'g' is the complementary set of g_c
            for n_clicks in range(2, Nneg3 + 1):
                if np.max(dt) > 0:
                    row, col = np.where(dt == np.max(dt))
                    row, col = zip(row, col)[0]
                    pts.append((row, col))
                    x_min = max(0, row - D)
                    x_max = min(row + D, dt.shape[0])
                    y_min = max(0, col - D)
                    y_max = min(col + D, dt.shape[1])
                    dt[x_min:x_max, y_min:y_max] = 0

    elif strategy == 2:
        # Get all negative object instances.
        instances = np.setdiff1d(np.unique(label_unmodified),
                                 np.append(instance, ignore_classes))

        num_neg_clicks = random.sample(list(range(0, Nneg2 + 1)), 1)
        for i in instances:
            g_c = np.where(label_unmodified == i)
            label = np.where(label_unmodified == i, 1, 0)
            pts_local = get_sampled_locations(g_c, np.copy(label),
                                              num_neg_clicks)
            pts = pts + pts_local

    u0 = get_distance_transform(pts, label, img=img, dt_method=dt_method)

    return u0, pts
Example #13
0
def extract(image_sg, image_og, og_filenames, classlist, nu_filenames):
    #    image_og = cv2.cvtColor(image_og, cv2.COLOR_BGR2RGB)

    #thresholding
    ret, thresh = cv2.threshold(image_sg, 180, 255, cv2.THRESH_BINARY)
    #morphological transformation
    kernel = np.ones((5, 5), np.uint8)
    #class_img = cv2.dilate(class_img,kernel,iterations = 1)
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
    #kernel = np.ones((9,9),np.uint8)
    #class_img = cv2.erode(class_img,kernel,iterations = 1)

    # compute the exact Euclidean distance from every binary
    # pixel to the nearest zero pixel, then find peaks in this
    # distance map
    D = ndimage.distance_transform_edt(thresh)
    kernel = np.ones((5, 5), np.float32) / 25
    D = cv2.filter2D(D, -1, kernel)

    localMax = peak_local_max(D, indices=False, min_distance=10, labels=thresh)
    #im = Image.fromarray(D).convert('1')
    #im.show(D)

    # perform a connected component analysis on the local peaks,
    # using 8-connectivity, then appy the Watershed algorithm
    markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
    labels = watershed(-D, markers, mask=thresh)
    print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))

    l = []
    for label in np.unique(labels):
        # if the label is zero, we are examining the 'background'
        # so simply ignore it
        if label == 0:
            continue

        # otherwise, allocate memory for the label region and draw
        # it on the mask
        mask = np.zeros(image_sg.shape, dtype="uint8")
        mask[labels == label] = 255

        # detect contours in the mask and grab the largest one
        #cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        #cnts = imutils.grab_contours(cnts)

        # find contours - cv2.findCountours() function changed from OpenCV3 to OpenCV4: now it have only two parameters instead of 3
        cv2MajorVersion = cv2.__version__.split(".")[0]

        # check for contours on thresh
        if int(cv2MajorVersion) == 4:
            ctrs, hier = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_SIMPLE)
        else:
            im2, ctrs, hier = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)

        #sort contours
        sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])

        for i, ctr in enumerate(sorted_ctrs):
            #get centroid
            M = cv2.moments(ctr)
            if M["m00"] > 0:
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])
                #print(cX, cY)
            else:
                continue

            a = int(64 / 2)
            # Getting ROI
            roi = image_og[(cY - a):(cY + a), (cX - a):(cX + a), :]
            if roi.shape != (a * 2, a * 2, 3):
                continue
            l.append([cX, cY, np.asarray(ctr)])
    print('l=', len(l))

    s = []
    for j, name in enumerate(nu_filenames):
        cX_nu = int(name[-11:-8])
        cY_nu = int(name[-7:-4])
        s.append([cX_nu, cY_nu, classlist[j]])

    for m in range(len(l)):
        for n in range(len(s)):
            if l[m][0] == s[n][0] and l[m][1] == s[n][1]:

                if int(s[n][2]) == 1:
                    # draw the contour and center of the shape on the image
                    cv2.drawContours(image_og, [l[m][2]], -1, (0, 255, 255), 2)

                elif int(s[n][2]) == 0:
                    cv2.drawContours(image_og, [l[m][2]], -1, (255, 0, 0), 2)


#    labels = labels + 50
#    labels[labels==50] = 0
    im = Image.fromarray(image_og).convert('RGB')
    #    im.show(image_og)
    savepath = os.path.join(
        os.path.join(
            'C:/CRC project/CRC_pytorch_chenyu/experiments/base_model/output_imgs_mxif_tumorcell_bottom2last/',
            'marked1'), ('%s.png' % (og_filenames)))
    cv2.imwrite(savepath, image_og)
def Workflow_slc25a17(struct_img,rescale_ratio, output_type, output_path, fn, output_func=None):
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [2, 36]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    dot_3d_sigma = 1
    dot_3d_cutoff = 0.045 #0.03 #0.04
    minArea = 3 #5
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append('im_norm')
    
    # rescale if needed
    if rescale_ratio>0:
        struct_img = resize(struct_img, [1, rescale_ratio, rescale_ratio], method="cubic")
        struct_img = (struct_img - struct_img.min() + 1e-8)/(struct_img.max() - struct_img.min() + 1e-8)
        gaussian_smoothing_truncate_range = gaussian_smoothing_truncate_range * rescale_ratio

    # smoothing with gaussian filter
    structure_img_smooth = image_smoothing_gaussian_slice_by_slice(struct_img, sigma=gaussian_smoothing_sigma, truncate_range=gaussian_smoothing_truncate_range)

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append('im_smooth')

    ###################
    # core algorithm
    ###################

    # step 1: LOG 3d 
    response = dot_3d(structure_img_smooth, log_sigma=dot_3d_sigma)
    bw = response > dot_3d_cutoff

    bw = remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False)

    out_img_list.append(bw.copy())
    out_name_list.append('interm_mask')

    # step 2: 'local_maxi + watershed' for cell cutting
    local_maxi = peak_local_max(struct_img,labels=label(bw), min_distance=2, indices=False)

    out_img_list.append(local_maxi.copy())
    out_name_list.append('interm_local_max')

    distance = distance_transform_edt(bw)
    im_watershed = watershed(-1*distance, label(dilation(local_maxi, selem=ball(1))), mask=bw, watershed_line=True)

    ###################
    # POST-PROCESSING
    ###################
    seg = remove_small_objects(im_watershed, min_size=minArea, connectivity=1, in_place=False)

    ###### HACK: Only for 2019 April Release #####
    if np.count_nonzero(seg>0)<50000:
        print('FLAG: please check the meta data of the original CZI for QC')
        
    # output
    seg = seg>0
    seg = seg.astype(np.uint8)
    seg[seg>0]=255

    out_img_list.append(seg.copy())
    out_name_list.append('bw_final')

    if output_type == 'default': 
        # the default final output
        save_segmentation(seg, False, output_path, fn)
    elif output_type == 'AICS_pipeline':
        # pre-defined output function for pipeline data
        save_segmentation(seg, True, output_path, fn)
    elif output_type == 'customize':
        # the hook for passing in a customized output function
        #output_fun(out_img_list, out_name_list, output_path, fn)
        print('please provide custom output function')
    elif output_type == 'array':
        return seg
    elif output_type == 'array_with_contour':
        return (seg, generate_segmentation_contour(seg))
    else:
        # the hook for pre-defined RnD output functions (AICS internal)
        img_list, name_list = SLC25A17_output(out_img_list, out_name_list, output_type, output_path, fn)
        if output_type == 'QCB':
            return img_list, name_list
Example #15
0
 def filter_using_image(self, workspace, mask):
     '''Filter out connections using local intensity minima between objects
     
     workspace - the workspace for the image set
     mask - mask of background points within the minimum distance
     '''
     #
     # NOTE: This is an efficient implementation and an improvement
     #       in accuracy over the Matlab version. It would be faster and
     #       more accurate to eliminate the line-connecting and instead
     #       do the following:
     #     * Distance transform to get the coordinates of the closest
     #       point in an object for points in the background that are
     #       at most 1/2 of the max distance between objects.
     #     * Take the intensity at this closest point and similarly
     #       label the background point if the background intensity
     #       is at least the minimum intensity fraction
     #     * Assume there is a connection between objects if, after this
     #       labeling, there are adjacent points in each object.
     #
     # As it is, the algorithm duplicates the Matlab version but suffers
     # for cells whose intensity isn't high in the centroid and clearly
     # suffers when two cells touch at some point that's off of the line
     # between the two.
     #
     objects = workspace.object_set.get_objects(self.objects_name.value)
     labels = objects.segmented
     image = self.get_image(workspace)
     if workspace.frame is not None:
         # Save the image for display
         workspace.display_data.image = image
     #
     # Do a distance transform into the background to label points
     # in the background with their closest foreground object
     #
     i, j = scind.distance_transform_edt(labels == 0,
                                         return_indices=True,
                                         return_distances=False)
     confluent_labels = labels[i, j]
     confluent_labels[~mask] = 0
     if self.where_algorithm == CA_CLOSEST_POINT:
         #
         # For the closest point method, find the intensity at
         # the closest point in the object (which will be the point itself
         # for points in the object).
         #
         object_intensity = image[i,
                                  j] * self.minimum_intensity_fraction.value
         confluent_labels[object_intensity > image] = 0
     count, index, c_j = morph.find_neighbors(confluent_labels)
     if len(c_j) == 0:
         # Nobody touches - return the labels matrix
         return labels
     #
     # Make a row of i matching the touching j
     #
     c_i = np.zeros(len(c_j))
     #
     # Eliminate labels without matches
     #
     label_numbers = np.arange(1, len(count) + 1)[count > 0]
     index = index[count > 0]
     count = count[count > 0]
     #
     # Get the differences between labels so we can use a cumsum trick
     # to increment to the next label when they change
     #
     label_numbers[1:] = label_numbers[1:] - label_numbers[:-1]
     c_i[index] = label_numbers
     c_i = np.cumsum(c_i).astype(int)
     if self.where_algorithm == CA_CENTROIDS:
         #
         # Only connect points > minimum intensity fraction
         #
         center_i, center_j = morph.centers_of_labels(labels)
         indexes, counts, i, j = morph.get_line_pts(center_i[c_i - 1],
                                                    center_j[c_i - 1],
                                                    center_i[c_j - 1],
                                                    center_j[c_j - 1])
         #
         # The indexes of the centroids at pt1
         #
         last_indexes = indexes + counts - 1
         #
         # The minimum of the intensities at pt0 and pt1
         #
         centroid_intensities = np.minimum(
             image[i[indexes], j[indexes]], image[i[last_indexes],
                                                  j[last_indexes]])
         #
         # Assign label numbers to each point so we can use
         # scipy.ndimage.minimum. The label numbers are indexes into
         # "connections" above.
         #
         pt_labels = np.zeros(len(i), int)
         pt_labels[indexes[1:]] = 1
         pt_labels = np.cumsum(pt_labels)
         minima = scind.minimum(image[i, j], pt_labels,
                                np.arange(len(indexes)))
         minima = morph.fixup_scipy_ndimage_result(minima)
         #
         # Filter the connections using the image
         #
         mif = self.minimum_intensity_fraction.value
         i = c_i[centroid_intensities * mif <= minima]
         j = c_j[centroid_intensities * mif <= minima]
     else:
         i = c_i
         j = c_j
     #
     # Add in connections from self to self
     #
     unique_labels = np.unique(labels)
     i = np.hstack((i, unique_labels))
     j = np.hstack((j, unique_labels))
     #
     # Run "all_connected_components" to get a component # for
     # objects identified as same.
     #
     new_indexes = morph.all_connected_components(i, j)
     new_labels = np.zeros(labels.shape, int)
     new_labels[labels != 0] = new_indexes[labels[labels != 0]]
     return new_labels
Example #16
0
data_array = cv2.imread("./CIRA/dehaze_example.tif", 1)

b, g, r = cv2.split(data_array)

# Sauvola

binary_global = canny(r)

fill_masks = ndi.binary_fill_holes(binary_global)
cells_cleaned = morphology.remove_small_objects(fill_masks, 70)
labeled_cells, num = ndi.label(cells_cleaned)

print("Number of Cells detected: " + str(num))
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(cells_cleaned)
local_maxi = peak_local_max(distance,
                            indices=False,
                            footprint=np.ones((100, 100)),
                            labels=labeled_cells)
markers = ndi.label(local_maxi)[0]
labels = watershed(-distance, markers, mask=cells_cleaned)

fig, axes = plt.subplots(ncols=4, figsize=(9, 3), sharex=True, sharey=True)
ax = axes.ravel()

ax[0].imshow(cells_cleaned, cmap=plt.cm.gray)
ax[0].set_title('Overlapping objects')
ax[1].imshow(-distance, cmap=plt.cm.gray)
ax[1].set_title('Distances')
ax[2].imshow(labels, cmap=plt.cm.nipy_spectral)
Example #17
0
def regions_to_network(im, dt=None, voxel_size=1):
    r"""
    Analyzes an image that has been partitioned into pore regions and extracts
    the pore and throat geometry as well as network connectivity.

    Parameters
    ----------
    im : ND-array
        An image of the pore space partitioned into individual pore regions.
        Note that this image must have zeros indicating the solid phase.

    dt : ND-array
        The distance transform of the pore space.  If not given it will be
        calculated, but it can save time to provide one if available.

    voxel_size : scalar
        The resolution of the image, expressed as the length of one side of a
        voxel, so the volume of a voxel would be **voxel_size**-cubed.  The
        default is 1, which is useful when overlaying the PNM on the original
        image since the scale of the image is alway 1 unit lenth per voxel.

    Returns
    -------
    A dictionary containing all the pore and throat size data, as well as the
    network topological information.  The dictionary names use the OpenPNM
    convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
    directly to an OpenPNM network object using the ``update`` command.

    """
    print('_' * 60)
    print('Extracting pore and throat information from image')
    from skimage.morphology import disk, ball
    struc_elem = disk if im.ndim == 2 else ball

    # if ~sp.any(im == 0):
    #     raise Exception('The received image has no solid phase (0\'s)')

    if dt is None:
        dt = spim.distance_transform_edt(im > 0)
        dt = spim.gaussian_filter(input=dt, sigma=0.5)

    # Get 'slices' into im for each pore region
    slices = spim.find_objects(im)

    # Initialize arrays
    Ps = sp.arange(1, sp.amax(im) + 1)
    Np = sp.size(Ps)
    p_coords = sp.zeros((Np, im.ndim), dtype=float)
    p_volume = sp.zeros((Np, ), dtype=float)
    p_dia_local = sp.zeros((Np, ), dtype=float)
    p_dia_global = sp.zeros((Np, ), dtype=float)
    p_label = sp.zeros((Np, ), dtype=int)
    p_area_surf = sp.zeros((Np, ), dtype=int)
    t_conns = []
    t_dia_inscribed = []
    t_area = []
    t_perimeter = []
    t_coords = []
    # dt_shape = sp.array(dt.shape)

    # Start extracting size information for pores and throats
    for i in tqdm(Ps):
        pore = i - 1
        if slices[pore] is None:
            continue
        s = extend_slice(slices[pore], im.shape)
        sub_im = im[s]
        sub_dt = dt[s]
        pore_im = sub_im == i
        padded_mask = sp.pad(pore_im, pad_width=1, mode='constant')
        pore_dt = spim.distance_transform_edt(padded_mask)
        s_offset = sp.array([i.start for i in s])
        p_label[pore] = i
        p_coords[pore, :] = spim.center_of_mass(pore_im) + s_offset
        p_volume[pore] = sp.sum(pore_im)
        p_dia_local[pore] = 2 * sp.amax(pore_dt)
        p_dia_global[pore] = 2 * sp.amax(sub_dt)
        p_area_surf[pore] = sp.sum(pore_dt == 1)
        im_w_throats = spim.binary_dilation(input=pore_im,
                                            structure=struc_elem(1))
        im_w_throats = im_w_throats * sub_im
        Pn = sp.unique(im_w_throats)[1:] - 1
        for j in Pn:
            if j > pore:
                t_conns.append([pore, j])
                vx = sp.where(im_w_throats == (j + 1))
                t_dia_inscribed.append(2 * sp.amax(sub_dt[vx]))
                t_perimeter.append(sp.sum(sub_dt[vx] < 2))
                t_area.append(sp.size(vx[0]))
                t_inds = tuple([i + j for i, j in zip(vx, s_offset)])
                temp = sp.where(dt[t_inds] == sp.amax(dt[t_inds]))[0][0]
                if im.ndim == 2:
                    t_coords.append(tuple((t_inds[0][temp], t_inds[1][temp])))
                else:
                    t_coords.append(
                        tuple((t_inds[0][temp], t_inds[1][temp],
                               t_inds[2][temp])))
    # Clean up values
    Nt = len(t_dia_inscribed)  # Get number of throats
    if im.ndim == 2:  # If 2D, add 0's in 3rd dimension
        p_coords = sp.vstack((p_coords.T, sp.zeros((Np, )))).T
        t_coords = sp.vstack((sp.array(t_coords).T, sp.zeros((Nt, )))).T

    net = {}
    net['pore.all'] = sp.ones((Np, ), dtype=bool)
    net['throat.all'] = sp.ones((Nt, ), dtype=bool)
    net['pore.coords'] = sp.copy(p_coords) * voxel_size
    net['pore.centroid'] = sp.copy(p_coords) * voxel_size
    net['throat.centroid'] = sp.array(t_coords) * voxel_size
    net['throat.conns'] = sp.array(t_conns)
    net['pore.label'] = sp.array(p_label)
    net['pore.volume'] = sp.copy(p_volume) * (voxel_size**3)
    net['throat.volume'] = sp.zeros((Nt, ), dtype=float)
    net['pore.diameter'] = sp.copy(p_dia_local) * voxel_size
    net['pore.inscribed_diameter'] = sp.copy(p_dia_local) * voxel_size
    net['pore.equivalent_diameter'] = 2 * (
        (3 / 4 * net['pore.volume'] / sp.pi)**(1 / 3))
    net['pore.extended_diameter'] = sp.copy(p_dia_global) * voxel_size
    net['pore.surface_area'] = sp.copy(p_area_surf) * (voxel_size)**2
    net['throat.diameter'] = sp.array(t_dia_inscribed) * voxel_size
    net['throat.inscribed_diameter'] = sp.array(t_dia_inscribed) * voxel_size
    net['throat.area'] = sp.array(t_area) * (voxel_size**2)
    net['throat.perimeter'] = sp.array(t_perimeter) * voxel_size
    net['throat.equivalent_diameter'] = (sp.array(t_area) *
                                         (voxel_size**2))**0.5
    P12 = net['throat.conns']
    PT1 = sp.sqrt(
        sp.sum(((p_coords[P12[:, 0]] - t_coords) * voxel_size)**2, axis=1))
    PT2 = sp.sqrt(
        sp.sum(((p_coords[P12[:, 1]] - t_coords) * voxel_size)**2, axis=1))
    net['throat.total_length'] = PT1 + PT2
    PT1 = PT1 - p_dia_local[P12[:, 0]] / 2 * voxel_size
    PT2 = PT2 - p_dia_local[P12[:, 1]] / 2 * voxel_size
    net['throat.length'] = PT1 + PT2
    dist = (p_coords[P12[:, 0]] - p_coords[P12[:, 1]]) * voxel_size
    net['throat.direct_length'] = sp.sqrt(sp.sum(dist**2, axis=1))
    # Make a dummy openpnm network to get the conduit lengths
    pn = op.network.GenericNetwork()
    pn.update(net)
    pn.add_model(propname='throat.endpoints',
                 model=op_gm.throat_endpoints.spherical_pores,
                 pore_diameter='pore.inscribed_diameter',
                 throat_diameter='throat.inscribed_diameter')
    pn.add_model(propname='throat.conduit_lengths',
                 model=op_gm.throat_length.conduit_lengths)
    pn.add_model(propname='pore.area', model=op_gm.pore_area.sphere)
    net['throat.endpoints.head'] = pn['throat.endpoints.head']
    net['throat.endpoints.tail'] = pn['throat.endpoints.tail']
    net['throat.conduit_lengths.pore1'] = pn['throat.conduit_lengths.pore1']
    net['throat.conduit_lengths.pore2'] = pn['throat.conduit_lengths.pore2']
    net['throat.conduit_lengths.throat'] = pn['throat.conduit_lengths.throat']
    net['pore.area'] = pn['pore.area']
    prj = pn.project
    prj.clear()
    wrk = op.Workspace()
    wrk.close_project(prj)

    return net
Example #18
0
len_y = np.array(final_rect).shape[1]
print(np.array(final_rect).shape)

#convert not zero to 1
for i in range(len_x):
    for j in range(len_y):
        if final_rect[i][j] != 0:
            final_rect[i][j] = 0
        else:
            final_rect[i][j] = 1

#show image
show_image(final_rect)

#Exact euclidean distance transform.
distance = ndimage.distance_transform_edt(final_rect)
print distance.shape
#np.savetxt("output_label.txt", distance, fmt = "%.3f")

#Exact local maxima(tree's peak)
binary_rect = np.array(final_rect)
local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((55,55)),threshold_abs = 10, labels = binary_rect)
print type(local_maxi)
print local_maxi.shape
show_image(local_maxi)

#zengqiang peak? ba peak ju zai yi qi?
markers = morphology.label(local_maxi)
print type(markers)
print markers.shape
show_image(markers)
cleaned_dividing = measure.label(binary_smoother_dividing)
print(cleaned_dividing.max())

#####################################################################
# dividing nuclei in this sample.

#####################################################################
# Segment nuclei
# ==============
# To separate overlapping nuclei, we resort to
# :ref:`sphx_glr_auto_examples_segmentation_plot_watershed.py`.
# To visualize the segmentation conveniently, we colour-code the labelled
# regions using the `color.label2rgb` function, specifying the background
# label with argument `bg_label=0`.

distance = ndi.distance_transform_edt(cells)

local_maxi = feature.peak_local_max(distance, indices=False, min_distance=7)

markers = measure.label(local_maxi)

segmented_cells = segmentation.watershed(-distance, markers, mask=cells)

fig, ax = plt.subplots(ncols=2, figsize=(10, 5))
ax[0].imshow(cells, cmap='gray')
ax[0].set_title('Overlapping nuclei')
ax[0].axis('off')
ax[1].imshow(color.label2rgb(segmented_cells, bg_label=0))
ax[1].set_title('Segmented nuclei')
ax[1].axis('off')
plt.show()
Example #20
0
def medial_axis(image, mask=None, return_distance=False):
    """
    Compute the medial axis transform of a binary image

    Parameters
    ----------
    image : binary ndarray, shape (M, N)
        The image of the shape to be skeletonized.
    mask : binary ndarray, shape (M, N), optional
        If a mask is given, only those elements in `image` with a true
        value in `mask` are used for computing the medial axis.
    return_distance : bool, optional
        If true, the distance transform is returned as well as the skeleton.

    Returns
    -------
    out : ndarray of bools
        Medial axis transform of the image
    dist : ndarray of ints, optional
        Distance transform of the image (only returned if `return_distance`
        is True)

    See also
    --------
    skeletonize

    Notes
    -----
    This algorithm computes the medial axis transform of an image
    as the ridges of its distance transform.

    The different steps of the algorithm are as follows
     * A lookup table is used, that assigns 0 or 1 to each configuration of
       the 3x3 binary square, whether the central pixel should be removed
       or kept. We want a point to be removed if it has more than one neighbor
       and if removing it does not change the number of connected components.

     * The distance transform to the background is computed, as well as
       the cornerness of the pixel.

     * The foreground (value of 1) points are ordered by
       the distance transform, then the cornerness.

     * A cython function is called to reduce the image to its skeleton. It
       processes pixels in the order determined at the previous step, and
       removes or maintains a pixel according to the lookup table. Because
       of the ordering, it is possible to process all pixels in only one
       pass.

    Examples
    --------
    >>> square = np.zeros((7, 7), dtype=np.uint8)
    >>> square[1:-1, 2:-2] = 1
    >>> square
    array([[0, 0, 0, 0, 0, 0, 0],
           [0, 0, 1, 1, 1, 0, 0],
           [0, 0, 1, 1, 1, 0, 0],
           [0, 0, 1, 1, 1, 0, 0],
           [0, 0, 1, 1, 1, 0, 0],
           [0, 0, 1, 1, 1, 0, 0],
           [0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
    >>> medial_axis(square).astype(np.uint8)
    array([[0, 0, 0, 0, 0, 0, 0],
           [0, 0, 1, 0, 1, 0, 0],
           [0, 0, 0, 1, 0, 0, 0],
           [0, 0, 0, 1, 0, 0, 0],
           [0, 0, 0, 1, 0, 0, 0],
           [0, 0, 1, 0, 1, 0, 0],
           [0, 0, 0, 0, 0, 0, 0]], dtype=uint8)

    """
    global _eight_connect
    if mask is None:
        masked_image = image.astype(np.bool)
    else:
        masked_image = image.astype(bool).copy()
        masked_image[~mask] = False
    #
    # Build lookup table - three conditions
    # 1. Keep only positive pixels (center_is_foreground array).
    # AND
    # 2. Keep if removing the pixel results in a different connectivity
    # (if the number of connected components is different with and
    # without the central pixel)
    # OR
    # 3. Keep if # pixels in neighbourhood is 2 or less
    # Note that table is independent of image
    center_is_foreground = (np.arange(512) & 2**4).astype(bool)
    table = (
        center_is_foreground  # condition 1.
        & (
            np.array([
                ndi.label(_pattern_of(index), _eight_connect)[1] != ndi.label(
                    _pattern_of(index & ~2**4), _eight_connect)[1]
                for index in range(512)
            ])  # condition 2
            | np.array(
                [np.sum(_pattern_of(index)) < 3 for index in range(512)]))
        # condition 3
    )

    # Build distance transform
    distance = ndi.distance_transform_edt(masked_image)
    if return_distance:
        store_distance = distance.copy()

    # Corners
    # The processing order along the edge is critical to the shape of the
    # resulting skeleton: if you process a corner first, that corner will
    # be eroded and the skeleton will miss the arm from that corner. Pixels
    # with fewer neighbors are more "cornery" and should be processed last.
    # We use a cornerness_table lookup table where the score of a
    # configuration is the number of background (0-value) pixels in the
    # 3x3 neighbourhood
    cornerness_table = np.array(
        [9 - np.sum(_pattern_of(index)) for index in range(512)])
    corner_score = _table_lookup(masked_image, cornerness_table)

    # Define arrays for inner loop
    i, j = np.mgrid[0:image.shape[0], 0:image.shape[1]]
    result = masked_image.copy()
    distance = distance[result]
    i = np.ascontiguousarray(i[result], dtype=np.intp)
    j = np.ascontiguousarray(j[result], dtype=np.intp)
    result = np.ascontiguousarray(result, np.uint8)

    # Determine the order in which pixels are processed.
    # We use a random # for tiebreaking. Assign each pixel in the image a
    # predictable, random # so that masking doesn't affect arbitrary choices
    # of skeletons
    #
    generator = np.random.RandomState(0)
    tiebreaker = generator.permutation(np.arange(masked_image.sum()))
    order = np.lexsort((tiebreaker, corner_score[masked_image], distance))
    order = np.ascontiguousarray(order, dtype=np.int32)

    table = np.ascontiguousarray(table, dtype=np.uint8)
    # Remove pixels not belonging to the medial axis
    _skeletonize_loop(result, i, j, order, table)

    result = result.astype(bool)
    if mask is not None:
        result[~mask] = image[~mask]
    if return_distance:
        return result, store_distance
    else:
        return result
Example #21
0
cv.imwrite('inner_a.png',inner_a)
cv.imwrite('inner_b.png',inner_b)

kernel = np.ones((5,5),np.uint8)

erosion_a = cv.erode(inner_a,kernel,iterations = 2)
erosion_b = cv.erode(inner_b,kernel,iterations = 2)

innerA = cv.dilate(erosion_a,kernel,iterations = 1)
innerB = cv.dilate(erosion_b,kernel,iterations = 1)

cv.imwrite('innerA.png',innerA)
cv.imwrite('innerB.png',innerB)
  
eucl_a = ndimage.distance_transform_edt(innerA)
eucl_b = ndimage.distance_transform_edt(innerB)

localMaxA = peak_local_max(eucl_a, indices=False, labels=innerA)
localMaxB = peak_local_max(eucl_b, indices=False, labels=innerB)

markers_a = ndimage.label(localMaxA, structure=np.ones((3, 3)))[0]
markers_b = ndimage.label(localMaxB, structure=np.ones((3, 3)))[0]

labels_a = watershed(-eucl_a, markers_a, mask=innerA)
labels_b = watershed(-eucl_b, markers_b, mask=innerB)

def get_area(labels, inner_mask, img):
    area = 0
    for label in np.unique(labels):
        if label== 0:
 def run(self, ips, imgs, para=None):
     dismap = ndimg.distance_transform_edt(imgs > 0)
     imgs[:] = np.clip(dismap, ips.range[0], ips.range[1])
def extract_radii(mask, skeleton):
    """ returns radii of mask based on nearest non-masked pixel along skeleton
    """
    distance = ndi.distance_transform_edt(mask)
    local_radii = distance * skeleton  # only keeps pixel in skeleton
    return local_radii
Example #24
0
 def _fill_data_holes(data, hole_max_value=0):
     '''
     Fill holes in data using a nearest neighbour approach.
     '''
     ids = nd.distance_transform_edt(data <= hole_max_value, return_distances=False, return_indices=True)
     return data[tuple(ids)]
def inter_mean(dye,
               skeleton,
               mask,
               relative_dist,
               interval_size=10,
               div=0.5,
               corr_for_missing_branches=False,
               return_only_conc=False):
    """ orth. projection on skeleton by finding the nearest point in skeleton
     plus average over nearest pixel in skeleton within interval <= 10 pixel
     allow enough pixel to be considered such that "inner" "outer" can be
     returned.
     corr_for_missing_branches -> removes pixels that belong to branches that
     were removes
     """

    if corr_for_missing_branches:
        dye = np.where(relative_dist > 1, 0, dye)

    intensity_inner = np.zeros_like(dye)
    intensity_outer = np.zeros_like(dye)
    no_inner = np.zeros_like(dye)
    no_outer = np.zeros_like(dye)

    # relative_dist, radii = relative_distance(skeleton, mask, local_radii)

    inds = ndi.distance_transform_edt(np.invert(skeleton.astype(bool)),
                                      return_distances=False,
                                      return_indices=True)

    dye_f = dye.flatten()
    inds0 = inds[0].flatten()
    inds1 = inds[1].flatten()
    relative_dist_f = relative_dist.flatten()

    for i in range(0, len(dye_f)):
        if dye_f[i] != 0:
            if relative_dist_f[i] < div:
                intensity_inner[inds0[i]][inds1[i]] += dye_f[i]
                no_inner[inds0[i]][inds1[i]] += 1
            else:
                intensity_outer[inds0[i]][inds1[i]] += dye_f[i]
                no_outer[inds0[i]][inds1[i]] += 1

    intensity = intensity_inner + intensity_outer
    no = no_inner + no_outer

    intensity = disk_filter(
        intensity, skeleton, interval_size, normalize_kernel=False) * skeleton
    no = disk_filter(no, skeleton, interval_size,
                     normalize_kernel=False) * skeleton

    concentration = np.true_divide(intensity,
                                   no,
                                   out=np.zeros_like(dye),
                                   where=intensity != 0)

    if return_only_conc:
        return concentration

    intensity_inner = disk_filter(
        intensity_inner, skeleton, interval_size,
        normalize_kernel=False) * skeleton
    intensity_outer = disk_filter(
        intensity_outer, skeleton, interval_size,
        normalize_kernel=False) * skeleton

    no_inner = disk_filter(
        no_inner, skeleton, interval_size, normalize_kernel=False) * skeleton
    no_outer = disk_filter(
        no_outer, skeleton, interval_size, normalize_kernel=False) * skeleton

    concentration_inner = np.true_divide(intensity_inner,
                                         no_inner,
                                         out=np.zeros_like(dye),
                                         where=intensity_inner != 0)

    concentration_outer = np.true_divide(intensity_outer,
                                         no_outer,
                                         out=np.zeros_like(dye),
                                         where=intensity_outer != 0)


    return  concentration, \
            concentration_inner, \
            concentration_outer
Example #26
0
def separate_watershed(
    vdf_temp,
    min_distance=1,
    min_size=1,
    max_size=np.inf,
    max_number_of_grains=np.inf,
    marker_radius=1,
    threshold=False,
    exclude_border=False,
    plot_on=False,
):
    """Separate segments from one VDF image using edge-detection by the
    sobel transform and the watershed segmentation implemented in
    scikit-image. See [1,2] for examples from scikit-image.

    Parameters
    ----------
    vdf_temp : np.array
        One VDF image.
    min_distance: int
        Minimum distance (in pixels) between markers for them to be
        considered separate markers for the watershed segmentation.
    min_size : float
        Grains with size (i.e. total number of pixels) below min_size
        are discarded.
    max_size : float
        Grains with size (i.e. total number of pixels) above max_size
        are discarded.
    max_number_of_grains : int
        Maximum number of grains included in the returned separated
        grains. If it is exceeded, those with highest peak intensities
        will be returned.
    marker_radius : float
        If 1 or larger, each marker for watershed is expanded to a disk
        of radius marker_radius. marker_radius should not exceed
        2*min_distance.
    threshold : bool
        If True, a mask is calculated by thresholding the VDF image by
        the Li threshold method in scikit-image. If False (default), the
        mask is the boolean VDF image.
    exclude_border : int or True, optional
        If non-zero integer, peaks within a distance of exclude_border
        from the boarder will be discarded. If True, peaks at or closer
        than min_distance of the boarder, will be discarded.
    plot_on : bool
        If True, the VDF, the mask, the distance transform
        and the separated grains will be plotted in one figure window.

    Returns
    -------
    sep : np.array
        Array containing segments from VDF images (i.e. separated
        grains). Shape: (image size x, image size y, number of grains)

    References
    ----------
    [1] http://scikit-image.org/docs/dev/auto_examples/segmentation/
        plot_watershed.html
    [2] http://scikit-image.org/docs/dev/auto_examples/xx_applications/
        plot_coins_segmentation.html#sphx-glr-auto-examples-xx-
        applications-plot-coins-segmentation-py
    """

    # Create a mask from the input VDF image.
    if threshold:
        th = threshold_li(vdf_temp)
        mask = np.zeros_like(vdf_temp)
        mask[vdf_temp > th] = True
    else:
        mask = vdf_temp.astype("bool")

    # Calculate the Eucledian distance from each point in the mask to the
    # nearest background point of value 0.
    distance = distance_transform_edt(mask)

    # If exclude_boarder is given, the edge of the distance is removed
    # by erosion. The distance image is used to find markers, and so the
    # erosion is done to avoid that markers are located at the edge
    # of the mask.
    if exclude_border > 0:
        distance_mask = binary_erosion(distance,
                                       structure=disk(exclude_border))
        distance = distance * distance_mask.astype("bool")

    # Find the coordinates of the local maxima of the distance transform.
    local_maxi = peak_local_max(
        distance,
        indices=False,
        min_distance=1,
        num_peaks=max_number_of_grains,
        exclude_border=exclude_border,
        threshold_rel=None,
    )
    maxi_coord1 = np.where(local_maxi)

    # Discard maxima that are found at pixels that are connected to a
    # smaller number of pixels than min_size. Used as markers, these would lead
    # to segments smaller than min_size and should therefore not be
    # considered when deciding which maxima to use as markers.
    if min_size > 1:
        labels_check = label(mask)[0]
        delete_indices = []
        for i in np.arange(np.shape(maxi_coord1)[1]):
            index = np.transpose(maxi_coord1)[i]
            label_value = labels_check[index[0], index[1]]
            if len(labels_check[labels_check == label_value]) < min_size:
                delete_indices.append(i)
                local_maxi[index[0], index[1]] = False
        maxi_coord1 = np.delete(maxi_coord1, delete_indices, axis=1)

    # Cluster the maxima by DBSCAN based on min_distance. For each
    # cluster, only the maximum closest to the average maxima position is
    # used as a marker.
    if min_distance > 1 and np.shape(maxi_coord1)[1] > 1:
        clusters = DBSCAN(
            eps=min_distance,
            metric="euclidean",
            min_samples=1,
        ).fit(np.transpose(maxi_coord1))
        local_maxi = np.zeros_like(local_maxi)
        for n in np.arange(clusters.labels_.max() + 1):
            maxi_coord1_n = np.transpose(maxi_coord1)[clusters.labels_ == n]
            com = np.average(maxi_coord1_n, axis=0).astype("int")
            index = distance_matrix([com], maxi_coord1_n).argmin()
            index = maxi_coord1_n[index]
            local_maxi[index[0], index[1]] = True

    # Use the resulting maxima as markers. Each marker should have a
    # unique label value. For each maximum, generate markers with the same
    # label value in a radius given by marker_radius centered at the
    # maximum position. This is done to make the segmentation more robust
    # to local changes in pixel values around the marker.
    markers = label(local_maxi)[0]
    if marker_radius >= 1:
        disk_mask = disk(marker_radius)
        for mm in np.arange(1, np.max(markers) + 1):
            im = np.zeros_like(markers)
            im[np.where(markers == mm)] = markers[np.where(markers == mm)]
            markers_temp = convolve2d(im,
                                      disk_mask,
                                      boundary="fill",
                                      mode="same",
                                      fillvalue=0)
            markers[np.where(markers_temp)] = mm
    markers = markers * mask

    # Find the edges of the VDF image using the Sobel transform.
    elevation = sobel(vdf_temp)

    # 'Flood' the elevation (i.e. edge) image from basins at the marker
    # positions. Find the locations where different basins meet, i.e.
    # the watershed lines (segment boundaries). Only search for segments
    # (labels) in the area defined by mask.
    labels = watershed(elevation, markers=markers, mask=mask)

    sep = np.zeros(
        (np.shape(vdf_temp)[0], np.shape(vdf_temp)[1], (np.max(labels))),
        dtype="int32")
    n, i = 1, 0
    while (np.max(labels)) > n - 1:
        sep_temp = labels * (labels == n) / n
        sep_temp = np.nan_to_num(sep_temp)
        # Discard a segment if it is too small or too large, or else add
        # it to the list of separated segments.
        if (np.sum(sep_temp, axis=(0, 1)) < min_size) or np.sum(
                sep_temp, axis=(0, 1)) > max_size:
            sep = np.delete(sep, ((n - i) - 1), axis=2)
            i = i + 1
        else:
            sep[:, :, (n - i) - 1] = sep_temp
        n = n + 1
    # Put the intensity from the input VDF image into each segmented area.
    vdf_sep = np.broadcast_to(vdf_temp.T, np.shape(sep.T)) * (sep.T == 1)

    if plot_on:  # pragma: no cover
        # If segments have been discarded, make new labels that do not
        # include the discarded segments.
        if np.max(labels) != (np.shape(sep)[2]) and (np.shape(sep)[2] != 0):
            labels = sep[:, :, 0]
            for i in range(1, np.shape(sep)[2]):
                labels = labels + sep[..., i] * (i + 1)
        # If no separated particles were found, set all elements in
        # labels to 0.
        elif np.shape(sep)[2] == 0:
            labels = np.zeros(np.shape(labels))

        seps_img_sum = np.zeros_like(vdf_temp).astype("float64")
        for l, vdf in zip(np.arange(1, np.max(labels) + 1), vdf_sep):
            mask_l = np.zeros_like(labels).astype("bool")
            mask_l[np.where(labels == l)] = 1
            seps_img_sum += vdf_temp * mask_l / np.max(
                vdf_temp[np.where(labels == l)])
            seps_img_sum[np.where(labels == l)] += l

        maxi_coord = np.where(local_maxi)

        fig, axes = plt.subplots(2, 3, sharex=True, sharey=True)
        ax = axes.ravel()

        ax[0].imshow(vdf_temp, cmap=plt.cm.magma_r)
        ax[0].axis("off")
        ax[0].set_title("VDF")

        ax[1].imshow(mask, cmap=plt.cm.gray_r)
        ax[1].axis("off")
        ax[1].set_title("Mask")

        ax[2].imshow(distance, cmap=plt.cm.gray_r)
        ax[2].axis("off")
        ax[2].set_title("Distance and markers")
        ax[2].imshow(masked_where(markers == 0, markers),
                     cmap=plt.cm.gist_rainbow)
        ax[2].plot(maxi_coord1[1], maxi_coord1[0], "k+")
        ax[2].plot(maxi_coord[1], maxi_coord[0], "gx")

        ax[3].imshow(elevation, cmap=plt.cm.magma_r)
        ax[3].axis("off")
        ax[3].set_title("Elevation")

        ax[4].imshow(labels, cmap=plt.cm.gnuplot2_r)
        ax[4].axis("off")
        ax[4].set_title("Labels")

        ax[5].imshow(seps_img_sum, cmap=plt.cm.magma_r)
        ax[5].axis("off")
        ax[5].set_title("Segments")

    return vdf_sep
def get_distance(m_b):
    distance = ndi.distance_transform_edt(m_b)
    return distance
Example #28
0
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.objects_name.value)
        assert isinstance(objects, cpo.Objects)
        labels = objects.segmented
        if self.relabel_option == OPTION_SPLIT:
            output_labels, count = scind.label(labels > 0, np.ones((3, 3),
                                                                   bool))
        else:
            if self.unify_option == UNIFY_DISTANCE:
                mask = labels > 0
                if self.distance_threshold.value > 0:
                    #
                    # Take the distance transform of the reverse of the mask
                    # and figure out what points are less than 1/2 of the
                    # distance from an object.
                    #
                    d = scind.distance_transform_edt(~mask)
                    mask = d < self.distance_threshold.value / 2 + 1
                output_labels, count = scind.label(mask, np.ones((3, 3), bool))
                output_labels[labels == 0] = 0
                if self.wants_image:
                    output_labels = self.filter_using_image(workspace, mask)
            elif self.unify_option == UNIFY_PARENT:
                parent_objects = workspace.object_set.get_objects(
                    self.parent_object.value)
                output_labels = parent_objects.segmented.copy()
                output_labels[labels == 0] = 0

        output_objects = cpo.Objects()
        output_objects.segmented = output_labels
        if objects.has_small_removed_segmented:
            output_objects.small_removed_segmented = \
                copy_labels(objects.small_removed_segmented, output_labels)
        if objects.has_unedited_segmented:
            output_objects.unedited_segmented = \
                copy_labels(objects.unedited_segmented, output_labels)
        output_objects.parent_image = objects.parent_image
        workspace.object_set.add_objects(output_objects,
                                         self.output_objects_name.value)

        measurements = workspace.measurements
        add_object_count_measurements(measurements,
                                      self.output_objects_name.value,
                                      np.max(output_objects.segmented))
        add_object_location_measurements(measurements,
                                         self.output_objects_name.value,
                                         output_objects.segmented)

        #
        # Relate the output objects to the input ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(output_objects)
        measurements.add_measurement(
            self.objects_name.value,
            FF_CHILDREN_COUNT % self.output_objects_name.value,
            children_per_parent)
        measurements.add_measurement(self.output_objects_name.value,
                                     FF_PARENT % self.objects_name.value,
                                     parents_of_children)
        if self.wants_outlines:
            outlines = cellprofiler.cpmath.outline.outline(output_labels)
            outline_image = cpi.Image(outlines.astype(bool))
            workspace.image_set.add(self.outlines_name.value, outline_image)

        if workspace.frame is not None:
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.output_labels = output_objects.segmented
from scipy import ndimage as ndi

from skimage.segmentation import watershed
from skimage.feature import peak_local_max

# Generate an initial image with two overlapping circles
x, y = np.indices((80, 80))
x1, y1, x2, y2 = 28, 28, 44, 52
r1, r2 = 16, 20
mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
image = np.logical_or(mask_circle1, mask_circle2)

# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(image)
local_maxi = peak_local_max(distance,
                            indices=False,
                            footprint=np.ones((3, 3)),
                            labels=image)
markers = ndi.label(local_maxi)[0]
labels = watershed(-distance, markers, mask=image)

fig, axes = plt.subplots(ncols=3, figsize=(9, 3), sharex=True, sharey=True)
ax = axes.ravel()

ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Overlapping objects')
ax[1].imshow(-distance, cmap=plt.cm.gray)
ax[1].set_title('Distances')
ax[2].imshow(labels, cmap=plt.cm.nipy_spectral)
Example #30
0
def regionprops_3D(im):
    r"""
    Calculates various metrics for each labeled region in a 3D image.

    The ``regionsprops`` method in **skimage** is very thorough for 2D images,
    but is a bit limited when it comes to 3D images, so this function aims
    to fill this gap.

    Parameters
    ----------
    im : array_like
        An imaging containing at least one labeled region.  If a boolean image
        is received than the ``True`` voxels are treated as a single region
        labeled ``1``.  Regions labeled 0 are ignored in all cases.

    Returns
    -------
    An augmented version of the list returned by skimage's ``regionprops``.
    Information, such as ``volume``, can be found for region A using the
    following syntax: ``result[A-1].volume``.

    Notes
    -----
    This function may seem slow compared to the skimage version, but that is
    because they defer calculation of certain properties until they are
    accessed while this one evalulates everything (inlcuding the deferred
    properties from skimage's ``regionprops``)

    Regions can be identified using a watershed algorithm, which can be a bit
    tricky to obtain desired results.  *PoreSpy* includes the SNOW algorithm,
    which may be helpful.

    """
    print('_'*60)
    print('Calculating regionprops')

    results = regionprops(im)
    for i in tqdm(range(len(results))):
        mask = results[i].image
        mask_padded = sp.pad(mask, pad_width=1, mode='constant')
        temp = spim.distance_transform_edt(mask_padded)
        dt = extract_subsection(temp, shape=mask.shape)
        # ---------------------------------------------------------------------
        # Slice indices
        results[i].slice = results[i]._slice
        # ---------------------------------------------------------------------
        # Volume of regions in voxels
        results[i].volume = results[i].area
        # ---------------------------------------------------------------------
        # Volume of bounding box, in voxels
        results[i].bbox_volume = sp.prod(mask.shape)
        # ---------------------------------------------------------------------
        # Create an image of the border
        results[i].border = dt == 1
        # ---------------------------------------------------------------------
        # Create an image of the maximal inscribed sphere
        r = dt.max()
        inv_dt = spim.distance_transform_edt(dt < r)
        results[i].inscribed_sphere = inv_dt < r
        # ---------------------------------------------------------------------
        # Find surface area using marching cubes and analyze the mesh
        tmp = sp.pad(sp.atleast_3d(mask), pad_width=1, mode='constant')
        verts, faces, norms, vals = marching_cubes_lewiner(volume=tmp, level=0)
        results[i].surface_mesh_vertices = verts
        results[i].surface_mesh_simplices = faces
        area = mesh_surface_area(verts, faces)
        results[i].surface_area = area
        # ---------------------------------------------------------------------
        # Find sphericity
        vol = results[i].volume
        r = (3/4/sp.pi*vol)**(1/3)
        a_equiv = 4*sp.pi*(r)**2
        a_region = results[i].surface_area
        results[i].sphericity = a_equiv/a_region
        # ---------------------------------------------------------------------
        # Find skeleton of region
        results[i].skeleton = skeletonize_3d(mask)
        # ---------------------------------------------------------------------
        # Volume of convex image, equal to area in 2D, so just translating
        results[i].convex_volume = results[i].convex_area
        # ---------------------------------------------------------------------
        # Convert region grid to a graph
        am = grid_to_graph(*mask.shape, mask=mask)
        results[i].graph = am

    return results