예제 #1
0
    def watershed(self, Ta=0):
        """
        Identification of particles through inverted slope comparisons
        
        Parameters
        -----------
        Ta : int
            Threshold value in which the particles will be identified by
        """
        self.Ta = Ta
        dist = mh.distance(self.image > 0.05 * self.Ta)
        dist1 = dist
        dist = dist.max() - dist
        dist -= dist.min()  # inverting color
        dist = dist / float(dist.ptp()) * 255
        dist = dist.astype(np.uint8)
        self.dist = mh.stretch(dist, 0, 255)
        self.labels, self.n_particles = mh.label(self.image > 0.7 * self.Ta)

        thr = np.median(dist)

        # not accurate to particles detected(?)
        # but matches dist graph well
        thresh = (dist < thr)
        areas = 0.9 * mh.cwatershed(dist, self.labels)
        self.areas = areas * thresh
        return
예제 #2
0
def buildGraph(img, skel):
    print('Building tree...')
    G = nx.Graph()
    visited = np.zeros_like(
        skel)  # a new array of zeros, same dimensions as skel

    print('  distance transform...')
    dmap = mh.distance(img)

    print('  constructing tree...')
    buildTree(skel, visited, dmap, root, j, t, G)

    # measure node diameters
    measureDia(G, dmap)

    # automatically remove bad nodes and branches, e.g. too small ones
    removed = cleanup(G, root)
    # show (automatically) removed nodes
    for x, y in removed:
        plt.gca().add_patch(plt.Circle((x, y), radius=4, alpha=.4))

    updateMeasures(G, root)

    # measure node diameters 2
    measureLeafWidth(contour, G, root)

    initializeColorNode(G)
    print('Done.')
    return G
예제 #3
0
    def extract(self):
        '''Extracts point pattern features.

        Returns
        -------
        pandas.DataFrame
            extracted feature values for each object in
            :attr:`label_image <jtlib.features.PointPattern.label_image>`
        '''

        logger.info('extract point pattern features')
        features = dict()
        for obj in self.parent_object_ids:
            parent_obj_img = self.get_parent_object_mask_image(obj)
            points_img = self.get_points_object_label_image(obj)
            point_ids = np.unique(points_img)[1:]
            mh.labeled.relabel(points_img, inplace=True)

            size = np.sum(parent_obj_img)
            abs_border_dist_img = mh.distance(parent_obj_img).astype(float)
            rel_border_dist_img = abs_border_dist_img / size
            centroids = mh.center_of_mass(points_img, labels=points_img)
            centroids = centroids[1:, :].astype(int)
            abs_distance_matrix = squareform(pdist(centroids))
            rel_distance_matrix = abs_distance_matrix / size

            indexer = np.arange(centroids.shape[0])
            if len(indexer) == 0:
                continue
            if len(indexer) == 1:
                y, x = centroids[0, :]
                values = [
                    abs_border_dist_img[y, x], rel_border_dist_img[y, x],
                    np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
                ]
                features[point_ids[0]] = values
                continue
            for i, (y, x) in enumerate(centroids):
                idx = indexer != i
                values = [
                    abs_border_dist_img[y, x],
                    rel_border_dist_img[y, x],
                    np.nanmin(abs_distance_matrix[i, idx]),
                    np.nanmin(rel_distance_matrix[i, idx]),
                    np.nanmean(abs_distance_matrix[i, idx]),
                    np.nanstd(abs_distance_matrix[i, idx]),
                    np.nanmean(rel_distance_matrix[i, idx]),
                    np.nanstd(rel_distance_matrix[i, idx]),
                ]
                features[point_ids[i]] = values

        ids = features.keys()
        values = list()
        nans = [np.nan for _ in range(len(self.names))]
        for i in self.object_ids:
            if i not in ids:
                logger.warn('values missing for object #%d', i)
                features[i] = nans
            values.append(features[i])
        return pd.DataFrame(values, columns=self.names, index=self.object_ids)
예제 #4
0
파일: watershed.py 프로젝트: afcarl/flywing
def get_seeds(boundary, method='grid', next_id=1):

    if method == 'grid':

        height = boundary.shape[0]
        width = boundary.shape[1]

        seed_positions = np.ogrid[0:height:seed_distance,
                                  0:width:seed_distance]
        num_seeds_y = seed_positions[0].size
        num_seeds_x = seed_positions[1].size
        num_seeds = num_seeds_x * num_seeds_y

        seeds = np.zeros_like(boundary).astype(np.int32)
        seeds[seed_positions] = np.arange(next_id,
                                          next_id + num_seeds).reshape(
                                              (num_seeds_y, num_seeds_x))

    if method == 'minima':

        minima = mahotas.regmin(boundary)
        seeds, num_seeds = mahotas.label(minima)
        seeds += next_id
        seeds[seeds == next_id] = 0

    if method == 'maxima_distance':

        distance = mahotas.distance(boundary < 0.5)
        maxima = mahotas.regmax(distance)
        seeds, num_seeds = mahotas.label(maxima)
        seeds += next_id
        seeds[seeds == next_id] = 0

    return seeds, num_seeds
예제 #5
0
def chromatids_elements(TopHatedChromosome):
    '''Take a High pass filtered (or top hat) image of a chromosome and label the chromatids elements
    '''
    threshed = TopHatedChromosome > 0
    #threshed = mh.open(threshed)
    labthres, _ = mh.label(threshed)
    labsz = mh.labeled.labeled_size(labthres)
    mh.labeled.remove_regions_where(labthres, labsz < 2, inplace=True)
    threshed = labthres > 0

    skel2 = mh.thin(threshed)
    bp2 = branchedPoints(skel2, showSE=False) > 0
    rem = np.logical_and(skel2, np.logical_not(bp2))
    labskel, _ = mh.labeled.label(rem)
    #print labskel.dtype
    size_sk = mh.labeled.labeled_size(labskel)
    #print size_sk
    skelem = mh.labeled.remove_regions_where(labskel, size_sk < 4)

    distances = mh.stretch(mh.distance(threshed))
    surface = (distances.max() - distances)
    chr_label = mh.cwatershed(surface, skelem)
    #print chr_label.dtype, type(chr_label)
    chr_label *= threshed

    #This convertion is important !!
    chr_label = chr_label.astype(np.intc)
    #-------------------------------
    mh.labeled.relabel(chr_label, inplace=True)
    labsize2 = mh.labeled.labeled_size(chr_label)
    cleaned = mh.labeled.remove_regions_where(chr_label, labsize2 < 8)
    mh.labeled.relabel(cleaned, inplace=True)
    return cleaned
예제 #6
0
    def perform_watershed(threshed, maxima):

        distances = mh.stretch(mh.distance(threshed))
        spots, n_spots = mh.label(maxima, Bc=np.ones((3, 3)))
        surface = (distances.max() - distances)

        return sk.morphology.watershed(surface, spots, mask=threshed)
예제 #7
0
def test_4d():
    np.random.seed(324)
    for _ in range(16):
        binim = np.random.random((4,8,4,6)) > .5
        dist = distance(binim)
        assert dist.shape == binim.shape
        assert np.all(dist[~binim] == 0)
        assert np.all(dist == _slow_dist4d(binim, 'euclidean2'))
예제 #8
0
def test_4d():
    np.random.seed(324)
    for _ in range(16):
        binim = np.random.random((4, 8, 4, 6)) > .5
        dist = distance(binim)
        assert dist.shape == binim.shape
        assert np.all(dist[~binim] == 0)
        assert np.all(dist == _slow_dist4d(binim, 'euclidean2'))
예제 #9
0
def segment(img, T):
    binimg = (img > T)
    binimg = ndimage.median_filter(binimg, size=5)
    dist = mahotas.distance(binimg)
    dist = dist.astype(np.int32)
    maxima = pymorph.regmax(dist)
    maxima,_ = ndimage.label(maxima)
    return mahotas.cwatershed(dist.max() - dist, maxima)
예제 #10
0
def _distance_transform_seeds(pmap, threshold, start_id):

    distance = mahotas.distance(pmap < threshold)
    maxima = mahotas.regmax(distance)
    seeds, num_seeds = mahotas.label(maxima)
    seeds += start_id
    #seeds[seeds==start_id] = 0 # TODO I don't get this
    return seeds, num_seeds
예제 #11
0
 def segmentCones(self):
     """Function to take regional maxima and segment the cones from them"""
     dist = mh.distance(self.orgImage > self.params['threshold'])
     dist = dist.max() - dist
     dist = dist = dist - dist.min()
     dist = dist/float(dist.ptp()) * 255
     dist = dist.astype(np.uint8)
     self.params['cones'] = mh.cwatershed(dist, self.ConeCounts.Seeds)
예제 #12
0
파일: Skeleton.py 프로젝트: avrajit/DIRT
 def skel(self, img):
     img[0,:]=0 # make 1st line in the image black to achieve consistent result between distance field and medial axis skeleton.
     img[len(img)-1,:]=0 # make last line in the image black to achieve consistent result between distance field and medial axis skeleton.
     img[:,len(img[0])-1]=0 # make right column in the image black to achieve consistent result between distance field and medial axis skeleton.
     img[:,0]=0 # make left column in the image black to achieve consistent result between distance field and medial axis skeleton.
     dmap = m.distance(img>0,metric='euclidean')
     dmap=np.sqrt(dmap)*2
     skelImg=m.thin(img>0)
     
     return skelImg, dmap
예제 #13
0
def nuclei_regions(comp_map):
    """
    NUCLEI_REGIONS: extract "support regions" for nuclei. This function
    expects as input a "tissue components map" (as returned, for example,
    by segm.tissue_components) where values of 1 indicate pixels having
    a color corresponding to nuclei.
    It returns a set of compact support regions corresponding to the
    nuclei.


    :param comp_map: numpy.ndarray
       A mask identifying different tissue components, as obtained
       by classification in RGB space. The value 0

       See segm.tissue.tissue_components()

    :return:
    """
    # Deprecated:...
    # img_hem, _ = rgb2he(img0, normalize=True)

    # img_hem = denoise_tv_bregman(img_hem, HE_OPTS['bregm'])

    # Get a mask of nuclei regions by unsupervised clustering:
    # Vector Quantization: background, mid-intensity Hem and high intensity Hem
    # -train the quantizer for 3 levels
    # vq = KMeans(n_clusters=3)
    # vq.fit(img_hem.reshape((-1,1)))
    # -the level of interest is the brightest:
    # k = np.argsort(vq.cluster_centers_.squeeze())[2]
    # mask_hem = (vq.labels_ == k).reshape(img_hem.shape)
    # ...end deprecated

    # Final mask:
    mask = (comp_map == 1)   # use the components classified by color

    # mask = morph.closing(mask, selem=HE_OPTS['strel1'])
    # mask = morph.opening(mask, selem=HE_OPTS['strel1'])
    # morph.remove_small_objects(mask, in_place=True)
    # mask = (mask > 0)

    mask = mahotas.close_holes(mask)
    morph.remove_small_objects(mask, in_place=True)

    dst  = mahotas.stretch(mahotas.distance(mask))
    Bc=np.ones((9,9))
    lmax = mahotas.regmax(dst, Bc=Bc)
    spots, _ = mahotas.label(lmax, Bc=Bc)
    regions = mahotas.cwatershed(lmax.max() - lmax, spots) * mask

    return regions
# end NUCLEI_REGIONS
예제 #14
0
def watershed_single_channel(np_array, threshold=10, blur_factor=9, max_bb_size=13000, min_bb_size=1000, footprint=10):
# INPUT:
#     np_array: numpy image of single channel
#     threshold: minimum value to be analysed
#     blur_factor: gaussian blur.  some blur is good.  too much is bad.  too little is bad too.
#     max_bb_size: set maximum size for a bounding box.
#     min_bb_size: set minimum size for a bounding box.
#     footprint: box of size footprint x footprint used to find regions of maximum intensity.
# OUTPUT:
#     return: array of bounding boxes for the channel image given
# NOTE: input values should be tuned.  this alg is useless if input arguments are not optimized... right now its by hand.  otherwise just leave them

    # nuclear is a blured version of origional image
    nuclear = mh.gaussian_filter(np_array, blur_factor)
    # calculate a minimum threshold using otsu method
    otsu_thresh = threshold_otsu(nuclear)
    
    # determine minimum threshold from otsu and input argument.  if little/no signal in image; otsu value can be way too low
    set_thresh = None
    if threshold>otsu_thresh:
        set_thresh = threshold
    else:
        set_thresh = otsu_thresh
    
    # set values lower than set_thresh to zero
    index_otsu = nuclear < set_thresh
    nuclear[index_otsu] = 0
    
    # determine areas of maximum intensity and the distance between them
    thresh = (nuclear > nuclear.mean())
    dist = mh.stretch(mh.distance(thresh))
    Bc = np.ones((footprint, footprint))

    # the code the generate region_props from watersheding alg.
    maxima = mh.morph.regmax(dist, Bc=Bc)
    spots, n_spots = mh.label(maxima, Bc=Bc)
    sizes = mh.labeled.labeled_size(spots)
    too_big = np.where(sizes > max_bb_size)
    spots = mh.labeled.remove_regions(spots, too_big)
    spots = mh.labeled.remove_bordering(spots)
    spots, n_left = mh.labeled.relabel(spots)
    surface = (dist.max() - dist)
    areas = mh.cwatershed(surface, spots)
    areas *= thresh

    # get list of region properties from watershed.  allot of information in region_props.  allot of which is inaccurate.  NEVER TRUST REGIONPROPS!
    region_props=regionprops(areas,intensity_image=nuclear)

    # generate array of bounding boxes from measured region properties. call bbs_from_rprops()
    watershed_bb_array = bbs_from_rprops(region_props, max_bb_size, min_bb_size)

    return(watershed_bb_array)
예제 #15
0
def watershed(Image_Current_T, CellDiam):

    #If there are pixels above the threshold proceed to watershed
    if Image_Current_T.max() == True:

        #Create distance transform from thresholded image to help identify cell seeds
        Image_Current_Tdist = mh.distance(Image_Current_T)
        Image_Current_Tdist[Image_Current_Tdist < CellDiam * 0.3] = 0

        #Define Sure Background for watershed
        #Background is dilated proportional to cell diam.  Allows final cell sizes to be a bit larger at end.
        #Will not affect cell number but can influence overlap
        #See https://docs.opencv.org/3.4/d3/db4/tutorial_py_watershed.html for tutorial that helps explain this
        Dilate_Iterations = int(CellDiam // 2)
        Dilate_bc = np.ones(
            (3, 3))  #Use square structuring element instead of cross
        Image_Current_SureBackground = Image_Current_T
        for j in range(Dilate_Iterations):
            Image_Current_SureBackground = mh.dilate(
                Image_Current_SureBackground, Bc=Dilate_bc)

        #Create seeds/foreground for watershed
        #See https://docs.opencv.org/3.4/d3/db4/tutorial_py_watershed.html for tutorial that helps explain this
        Regmax_bc = np.ones((CellDiam, CellDiam))
        Image_Current_Seeds = mh.locmax(Image_Current_Tdist, Bc=Regmax_bc)
        Image_Current_Seeds[Image_Current_Tdist == 0] = False
        Image_Current_Seeds = mh.dilate(Image_Current_Seeds, np.ones((3, 3)))
        seeds, nr_nuclei = mh.label(Image_Current_Seeds, Bc=np.ones((3, 3)))
        Image_Current_Unknown = Image_Current_SureBackground.astype(
            int) - Image_Current_Seeds.astype(int)
        seeds += 1
        seeds[Image_Current_Unknown == 1] = 0

        #Perform watershed
        Image_Current_Watershed = mh.cwatershed(
            surface=Image_Current_SureBackground, markers=seeds)
        Image_Current_Watershed -= 1  #Done so that background value is equal to 0.
        Image_Current_Cells = Image_Current_Watershed

    #If there are no pixels above the threshold watershed procedure has issues.  Set cell count to 0.
    elif Image_Current_T.max() == False:
        Image_Current_Cells = Image_Current_T.astype(int)
        nr_nuclei = 0

    #return Image_Current_Seeds, nr_nuclei
    return Image_Current_Cells, nr_nuclei
예제 #16
0
def _segment(cell):
    # takes a numpy array of a microscopy
    # segments it based on filtering the image then applying a distance transform and
    # a watershed method to get the proper segmentation

    import mahotas as mh
    filt_cell = mh.gaussian_filter(cell, 2)
    T = mh.thresholding.otsu((np.rint(filt_cell).astype('uint8')))
    dist = mh.stretch(mh.distance(filt_cell > T))
    
    Bc = np.ones((3,3))
    rmax = mh.regmin((dist))
    rmax = np.invert(rmax)
    labels, num_cells = mh.label(rmax, Bc)
    surface = (dist.max() - dist)
    areas = mh.cwatershed(dist, labels)
    areas *= T
    return areas
예제 #17
0
def _segment(cell):
    # takes a numpy array of a microscopy
    # segments it based on filtering the image then applying a distance transform and
    # a watershed method to get the proper segmentation

    import mahotas as mh
    filt_cell = mh.gaussian_filter(cell, 2)
    T = mh.thresholding.otsu((np.rint(filt_cell).astype('uint8')))
    dist = mh.stretch(mh.distance(filt_cell > T))

    Bc = np.ones((3, 3))
    rmax = mh.regmin((dist))
    rmax = np.invert(rmax)
    labels, num_cells = mh.label(rmax, Bc)
    surface = (dist.max() - dist)
    areas = mh.cwatershed(dist, labels)
    areas *= T
    return areas
예제 #18
0
    def run(self):
        while not self.terminate:
            idx, x, y, value = self.queue.get()
            self.zero[int(y), int(x)] = False
            dmap = mh.distance(self.zero)
            bools = dmap < self.brush_size_sq
            #self.binary_array[idx][bools] = value
            np.putmask(self.binary_array[idx], bools, value)
            self.edited[idx] = True

            #self.iw.overlay_item.setImage(self.binary_array[idx])
            self.iw.olay_updated.emit(self.binary_array[idx])
            self.zero[int(y), int(x)] = True

            self.queue.task_done()

        self.terminate = False
        return 0
예제 #19
0
def segment(fname):
    dna = mh.imread(fname)
    dna = dna[:,:,0]

    sigma = 12.
    dnaf = mh.gaussian_filter(dna, sigma)

    T_mean = dnaf.mean()
    bin_image = dnaf > T_mean
    labeled, nr_objects = mh.label(bin_image)

    maxima = mh.regmax(mh.stretch(dnaf))
    maxima = mh.dilate(maxima, np.ones((5,5)))
    maxima,_ = mh.label(maxima)
    dist = mh.distance(bin_image)
    dist = 255 - mh.stretch(dist)
    watershed = mh.cwatershed(dist, maxima)
    watershed *= bin_image
    return watershed
예제 #20
0
파일: __init__.py 프로젝트: emilroz/skopy
def identify_primary_objects(image, footprint=(16, 16), sigma=1):
    """
    Identifies primary components in an image.
    """
    response = skimage.filters.gaussian(image, sigma)

    mask = response > skimage.filters.threshold_li(response)

    image = mahotas.distance(mask)

    image = skimage.exposure.rescale_intensity(image)

    footprint = numpy.ones(footprint)

    markers = mahotas.regmax(image, footprint)

    markers, _ = mahotas.label(markers, footprint)

    image = numpy.max(image) - image

    return skimage.segmentation.watershed(image, markers, mask=mask)
예제 #21
0
def segment_nuc(im):
  T = mh.thresholding.otsu(im) # calculate a threshold value

  # Apply a gaussian filter to smooth the image
  smoothed = mh.gaussian_filter(im, 8)
  thresholded= smoothed > T # do threshold

  # Watershed
  smoothed = mh.gaussian_filter(im, 10)
  regional_max = mh.regmax(smoothed)
  dist_im = mh.distance(thresholded)
  seeds,count = mh.label(regional_max) # nuclei count
  watershedded = mh.cwatershed(dist_im, seeds)

  # Remove areas that aren't nuclei
  watershedded[np.logical_not(thresholded)] = 0


  cell_id = ['abc', 'def', 'ghi']
  # cell_centroid = [(1,2),(3,4)]
  # return watershedded, cell_id
  return watershedded
예제 #22
0
def buildGraph(img, skel):
    print('Building tree...')
    G = nx.Graph()
    visited = np.zeros_like(skel) # a new array of zeros, same dimensions as skel

    print('  distance transform...')
    dmap = mh.distance(img)

    print('  constructing tree...')
    buildTree(skel, visited, dmap, root, j, t, G)

    # measure node diameters
    measureDia(G, dmap)

    # automatically remove bad nodes and branches, e.g. too small ones
    removed = cleanup(G, root)
    # show (automatically) removed nodes
    for x,y in removed:
        plt.gca().add_patch(plt.Circle((x,y), radius=4, alpha=.4))

    updateMeasures(G, root)

    print('Done.')
    return G
예제 #23
0
from __future__ import print_function

import pylab as p
import numpy as np
import mahotas

f = np.ones((256,256), bool)
f[200:,240:] = False
f[128:144,32:48] = False
# f is basically True with the exception of two islands: one in the lower-right
# corner, another, middle-left

dmap = mahotas.distance(f)
p.imshow(dmap)
p.show()
예제 #24
0
#%%
desc = []
labels = []

for i in range(0, 3, 1):
    im = skimage.io.imread('labeled/{0}.bmp'.format(i))
    im = im[:(im.shape[0] / b) * b, :(im.shape[1] / b) * b]

    plt.imshow(im)

    lg = skimage.io.imread('labeled/{0}_map.png'.format(i))
    lg = lg[:(im.shape[0] / b) * b, :(im.shape[1] / b) * b]

    lg = lg[:, :, 3]

    lg = mahotas.distance(lg) > 100

    descriptors, (nhd, nwd), (dh, dw) = build_descriptors(im, b)

    fig = plt.gcf()
    for ii in range(nhd):
        for jj in range(nwd):
            desc.append(descriptors[ii * nwd + jj])
            labels.append(lg[ii * b + dh / 2 + b / 2, jj * b + dw / 2 + b / 2])
            if labels[-1]:
                circle = plt.Circle((jj * b + dw / 2 + b / 2, ii * b + dh / 2 + b / 2), 1.0, color='r')
                fig.gca().add_artist(circle)

    plt.show()

svm = sklearn.svm.LinearSVC(class_weight = 'balanced')
예제 #25
0
def compare_slow(bw):
    for metric in ('euclidean', 'euclidean2'):
        f = distance(bw, metric)
        sd = _slow_dist(bw, metric)
        assert np.all(f == sd)
예제 #26
0
)  # Funckija mh.gaussian_filter() postavlja vrijednost varijable u float64.
rmax = mh.regmax(result)  # Trazi maksimalne vrijednosti.

print rmax
pylab.imshow(mh.overlay(image, rmax))
pylab.show()

labeled, nr_objects = mh.label(rmax)
# mh.overlay() funckija postavlja img kao pozadinu a preko nje postavlja vrijednosti variable rmax u crvenom kanalu.

print('Broj pronadenih objekata je {}.'.format(nr_objects))
pylab.imshow(labeled)
# pylab.gray()
pylab.show()

dist = mh.distance(result)
dist = dist.max() - dist
dist -= dist.min()
dist = dist / float(dist.ptp()) * 255
dist = dist.astype(np.uint8)
objects = mh.cwatershed(dist, labeled)
whole = mh.segmentation.gvoronoi(
    objects
)  # Voronoi segemtacija, svaki piksel poprima vrijednost najblizeg maksimuma.
pylab.imshow(objects)
pylab.show()

# print (labeled).shape
# print (labeled).max()
# print (labeled).min()
예제 #27
0
dnaf = mh.gaussian_filter(dnaf, 8)
rmax = mh.regmax(dnaf)
pylab.imshow(mh.overlay(
    dna, rmax))  # print dna and rmax (with second channel in red)
pylab.show()  # seeds only show when image is zoomed in

dnaf = mh.gaussian_filter(dnaf,
                          16)  # apply different filter to yield better result
rmax = mh.regmax(dnaf)
pylab.imshow(mh.overlay(dna, rmax))
pylab.show()

seeds, nr_nuclei = mh.label(rmax)  # nuclei count
print nr_nuclei  # unlike the example, the result is 36 compared to 22

dist = mh.distance(dnat)
dist = dist.max() - dist
dist -= dist.min()
dist = dist / float(dist.ptp()) * 255
dist = dist.astype(np.uint8)
pylab.imshow(dist)
pylab.show()

nuclei = mh.cwatershed(dist, seeds)
pylab.imshow(nuclei)
pylab.show()

pylab.show()
nuclei[nuclei1] = 0
pylab.imshow(nuclei1)
pylab.show()
예제 #28
0
def process_image(im, d, test=False, remove_bordering=False):
    plt.figure(1, frameon=False)
    sigma = 75
    blurred = mh.gaussian_filter(im.astype(float), sigma)
    T_mean = blurred.mean()
    bin_image = im > T_mean

    maxima = mh.regmax(mh.stretch(blurred))
    maxima, _ = mh.label(maxima)

    dist = mh.distance(bin_image)

    dist = 255 - mh.stretch(dist)
    watershed = mh.cwatershed(dist, maxima)

    _, old_nr_objects = mh.labeled.relabel(watershed)

    sizes = mh.labeled.labeled_size(watershed)
    min_size = 100000
    filtered = mh.labeled.remove_regions_where(
        watershed * bin_image, sizes < min_size)

    _, nr_objects = mh.labeled.relabel(filtered)
    print('Removed', old_nr_objects - nr_objects, 'small regions')
    old_nr_objects = nr_objects

    if (remove_bordering):
        filtered = mh.labeled.remove_bordering(filtered)
    labeled, nr_objects = mh.labeled.relabel(filtered)

    print('Removed', old_nr_objects - nr_objects, 'bordering cells')

    print("Number of cells: {}".format(nr_objects))
    fin_weights = mh.labeled_sum(im.astype(np.uint32), labeled)
    fin_sizes = mh.labeled.labeled_size(labeled)
    fin_result = fin_weights / fin_sizes
    if (test):
        f, axarr = plt.subplots(2, 2)
        for i in range(2):
            for j in range(2):
                axarr[i][j].axis('off')
        axarr[0, 0].imshow(im)
        axarr[0, 0].set_title('Source')
        axarr[0, 1].imshow(labeled)
        axarr[0, 1].set_title('Labeled')
        axarr[1, 0].imshow(watershed)
        axarr[1, 0].set_title('Watershed')
        axarr[1, 1].imshow(blurred)
        axarr[1, 1].set_title('Blurred')
        for i in range(1, nr_objects + 1):
            print("Cell {} average luminescence is {}".format(
                i, fin_result[i]))
            bbox = mh.bbox((labeled == i))
            plt.text((bbox[2] + bbox[3]) / 2, (bbox[0] + bbox[1]
                                               ) / 2, str(i), fontsize=20, color='black')
        # plt.show()
        plt.savefig("test" + str(nr_objects) + ".svg",
                    format='svg', bbox_inches='tight', dpi=1200)
    else:
        for i in range(1, nr_objects + 1):
            bbox = mh.bbox((labeled == i))
            cell = (im * (labeled == i))[bbox[0]:bbox[1], bbox[2]:bbox[3]]
            hashed = hashlib.sha1(im).hexdigest()
            imsave(d + data_dir + hashed + '-' + str(i) +
                   '.png', imresize(cell, (img_rows, img_cols)))
예제 #29
0
def separate_clumped_objects(clumps_image, min_cut_area, min_area, max_area,
        max_circularity, max_convexity):
    '''Separates objects in `clumps_image` based on morphological criteria.

    Parameters
    ----------
    clumps_image: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        objects that should be separated
    min_cut_area: int
        minimal area an object must have (prevents cuts that would result
        in too small objects)
    min_area: int
        minimal area an object must have to be considered a clump
    max_area: int
        maximal area an object can have to be considered a clump
    max_circularity: float
        maximal circularity an object must have to be considerd a clump
    max_convexity: float
        maximal convexity an object must have to be considerd a clump

    Returns
    -------
    numpy.ndarray[numpy.uint32]
        separated objects

    See also
    --------
    :class:`jtlib.features.Morphology`
    '''

    logger.info('separate clumped objects')
    label_image, n_objects = mh.label(clumps_image)
    if n_objects == 0:
        logger.debug('no objects')
        return label_image

    pad = 1
    cutting_pass = 1
    separated_image = label_image.copy()
    while True:
        logger.info('cutting pass #%d', cutting_pass)
        cutting_pass += 1
        label_image = mh.label(label_image > 0)[0]

        f = Morphology(label_image)
        values = f.extract()
        index = (
            (min_area < values['Morphology_Area']) &
            (values['Morphology_Area'] <= max_area) &
            (values['Morphology_Convexity'] <= max_convexity) &
            (values['Morphology_Circularity'] <= max_circularity)
        )
        clumped_ids = values[index].index.values
        not_clumped_ids = values[~index].index.values

        if len(clumped_ids) == 0:
            logger.debug('no more clumped objects')
            break

        mh.labeled.remove_regions(label_image, not_clumped_ids, inplace=True)
        mh.labeled.relabel(label_image, inplace=True)
        bboxes = mh.labeled.bbox(label_image)
        for oid in np.unique(label_image[label_image > 0]):
            bbox = bboxes[oid]
            logger.debug('process clumped object #%d', oid)
            obj_image = extract_bbox(label_image, bboxes[oid], pad=pad)
            obj_image = obj_image == oid

            # Rescale distance intensities to make them independent of clump size
            dist = mh.stretch(mh.distance(obj_image))

            # Find peaks that can be used as seeds for the watershed transform
            thresh = mh.otsu(dist)
            peaks = dist > thresh
            n = mh.label(peaks)[1]
            if n == 1:
                logger.debug(
                    'only one peak detected - perform iterative erosion'
                )
                # Iteratively shrink the peaks until we have two peaks that we
                # can use to separate the clump.
                while True:
                    tmp = mh.morph.open(mh.morph.erode(peaks))
                    n = mh.label(tmp)[1]
                    if n == 2 or n == 0:
                        if n == 2:
                            peaks = tmp
                        break
                    peaks = tmp

            # Select the two biggest peaks, since we want only two objects.
            peaks = mh.label(peaks)[0]
            sizes = mh.labeled.labeled_size(peaks)
            index = np.argsort(sizes)[::-1][1:3]
            for label in np.unique(peaks):
                if label not in index:
                    peaks[peaks == label] = 0
            peaks = mh.labeled.relabel(peaks)[0]
            regions = mh.cwatershed(np.invert(dist), peaks)

            # Use the line separating watershed regions to make the cut
            se = np.ones((3,3), np.bool)
            line = mh.labeled.borders(regions, Bc=se)
            line[~obj_image] = 0
            line = mh.morph.dilate(line)

            # Ensure that cut is reasonable given user-defined criteria
            test_cut_image = obj_image.copy()
            test_cut_image[line] = False
            subobjects, n_subobjects = mh.label(test_cut_image)
            sizes = mh.labeled.labeled_size(subobjects)
            smaller_object_area = np.min(sizes)
            smaller_id = np.where(sizes == smaller_object_area)[0][0]
            smaller_object = subobjects == smaller_id

            do_cut = (
                (smaller_object_area > min_cut_area) &
                (np.sum(line) > 0)
            )
            if do_cut:
                logger.debug('cut object #%d', oid)
                y, x = np.where(line)
                y_offset, x_offset = bboxes[oid][[0, 2]] - pad - 1
                y += y_offset
                x += x_offset
                label_image[y, x] = 0
                separated_image[y, x] = 0
            else:
                logger.debug('don\'t cut object #%d', oid)
                mh.labeled.remove_regions(label_image, oid, inplace=True)

    return mh.label(separated_image)[0]
예제 #30
0
def separate_clumped_objects(clumps_image, min_cut_area, min_area, max_area,
        max_circularity, max_convexity):
    '''Separates objects in `clumps_image` based on morphological criteria.

    Parameters
    ----------
    clumps_image: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        objects that should be separated
    min_cut_area: int
        minimal area an object must have (prevents cuts that would result
        in too small objects)
    min_area: int
        minimal area an object must have to be considered a clump
    max_area: int
        maximal area an object can have to be considered a clump
    max_circularity: float
        maximal circularity an object must have to be considerd a clump
    max_convexity: float
        maximal convexity an object must have to be considerd a clump

    Returns
    -------
    numpy.ndarray[numpy.uint32]
        separated objects

    See also
    --------
    :class:`jtlib.features.Morphology`
    '''

    logger.info('separate clumped objects')
    label_image, n_objects = mh.label(clumps_image)
    if n_objects == 0:
        logger.debug('no objects')
        return label_image

    pad = 1
    cutting_pass = 1
    separated_image = label_image.copy()
    while True:
        logger.info('cutting pass #%d', cutting_pass)
        cutting_pass += 1
        label_image = mh.label(label_image > 0)[0]

        f = Morphology(label_image)
        values = f.extract()
        index = (
            (min_area < values['Morphology_Area']) &
            (values['Morphology_Area'] <= max_area) &
            (values['Morphology_Convexity'] <= max_convexity) &
            (values['Morphology_Circularity'] <= max_circularity)
        )
        clumped_ids = values[index].index.values
        not_clumped_ids = values[~index].index.values

        if len(clumped_ids) == 0:
            logger.debug('no more clumped objects')
            break

        mh.labeled.remove_regions(label_image, not_clumped_ids, inplace=True)
        mh.labeled.relabel(label_image, inplace=True)
        bboxes = mh.labeled.bbox(label_image)
        for oid in np.unique(label_image[label_image > 0]):
            bbox = bboxes[oid]
            logger.debug('process clumped object #%d', oid)
            obj_image = extract_bbox(label_image, bboxes[oid], pad=pad)
            obj_image = obj_image == oid

            # Rescale distance intensities to make them independent of clump size
            dist = mh.stretch(mh.distance(obj_image))

            # Find peaks that can be used as seeds for the watershed transform
            thresh = mh.otsu(dist)
            peaks = dist > thresh
            n = mh.label(peaks)[1]
            if n == 1:
                logger.debug(
                    'only one peak detected - perform iterative erosion'
                )
                # Iteratively shrink the peaks until we have two peaks that we
                # can use to separate the clump.
                while True:
                    tmp = mh.morph.open(mh.morph.erode(peaks))
                    n = mh.label(tmp)[1]
                    if n == 2 or n == 0:
                        if n == 2:
                            peaks = tmp
                        break
                    peaks = tmp

            # Select the two biggest peaks, since we want only two objects.
            peaks = mh.label(peaks)[0]
            sizes = mh.labeled.labeled_size(peaks)
            index = np.argsort(sizes)[::-1][1:3]
            for label in np.unique(peaks):
                if label not in index:
                    peaks[peaks == label] = 0
            peaks = mh.labeled.relabel(peaks)[0]
            regions = mh.cwatershed(np.invert(dist), peaks)

            # Use the line separating watershed regions to make the cut
            se = np.ones((3,3), np.bool)
            line = mh.labeled.borders(regions, Bc=se)
            line[~obj_image] = 0
            line = mh.morph.dilate(line)

            # Ensure that cut is reasonable given user-defined criteria
            test_cut_image = obj_image.copy()
            test_cut_image[line] = False
            subobjects, n_subobjects = mh.label(test_cut_image)
            sizes = mh.labeled.labeled_size(subobjects)
            smaller_object_area = np.min(sizes)
            smaller_id = np.where(sizes == smaller_object_area)[0][0]
            smaller_object = subobjects == smaller_id

            do_cut = (
                (smaller_object_area > min_cut_area) &
                (np.sum(line) > 0)
            )
            if do_cut:
                logger.debug('cut object #%d', oid)
                y, x = np.where(line)
                y_offset, x_offset = bboxes[oid][[0, 2]] - pad - 1
                y += y_offset
                x += x_offset
                label_image[y, x] = 0
                separated_image[y, x] = 0
            else:
                logger.debug('don\'t cut object #%d', oid)
                mh.labeled.remove_regions(label_image, oid, inplace=True)

    return mh.label(separated_image)[0]
예제 #31
0
def test_uint8():
    # This did not work correctly in 0.9.5
    a8 = np.zeros((5, 5), dtype=np.uint8)
    ab = np.zeros((5, 5), dtype=bool)
    assert np.all(distance(a8) == distance(ab))
예제 #32
0
    def passmsg(self,child_part,parent_part):
        '''function [score,Ix,Iy,Ik] = passmsg(child,parent)'''
    
        INF = 1e+10
        K ,  = child_part['filterid'].squeeze().shape 
        Nk,Ny,Nx = parent_part['score'].shape
        Ix0 = np.zeros([K,Ny,Nx])
        Iy0 = np.zeros([K,Ny,Nx])
        score0 = np.zeros([K,Ny,Nx],dtype='float64') - INF
        L = parent_part['filterid'].size
        for k in range(K):
            #assert child_part['w'][0,k]>0
            #assert child_part['w'][2,k]>0
            #print child_part['w'][0,k],child_part['w'][1,k],child_part['w'][2,k]
            #score_tmp,Ix_tmp,Iy_tmp = self.dt2d(child_part['score'][k,:,:].astype('float64'),child_part['w'][0,k],child_part['w'][1,k],child_part['w'][2,k],child_part['w'][3,k])
            #print child_part['score'][k,:,:].flags

            score_tmp,Ix_tmp,Iy_tmp=mahotas.distance(np.asfortranarray(child_part['score'][k,:,:]),child_part['w'][0,k],child_part['w'][1,k],child_part['w'][2,k],child_part['w'][3,k])
            startx = np.int_(child_part['startx'][k])-1
            starty = np.int_(child_part['starty'][k])-1
            step = child_part['step'].squeeze()
            #% ending points
            endy = starty+step*(Ny-1)+1
            endx = startx+step*(Nx-1)+1    
            endy = min(child_part['score'].shape[1],endy)
            endx = min(child_part['score'].shape[2],endx)
            #y sample points
            iy = np.arange(starty,endy,step).astype(int)
            ix = np.arange(startx,endx,step).astype(int)
            #print 'size iy,ix', iy.size,ix.size
            if iy.size == 0 or ix.size ==0:            
                score = np.zeros([L,Ny,Nx],dtype='float64')
                Ix = np.zeros([L,Ny,Nx]).astype(int)
                Iy = np.zeros([L,Ny,Nx]).astype(int)
                Ik = np.zeros([L,Ny,Nx]).astype(int)
                return score,Ix,Iy,Ik
            oy = sum(iy<0)
            #print 'iy',iy
            iy = iy[iy>=0]              
            ox = sum(ix<0)
            ix = ix[ix>=0]          
            #sample scores        
            sp = score_tmp[iy,:][:,ix]        
            sx = Ix_tmp[iy,:][:,ix]
            sy = Iy_tmp[iy,:][:,ix]
            sz = sp.shape    
            #define msgs
            iy  = np.arange(oy,oy+sz[0])
            ix  = np.arange(ox,ox+sz[1]) 
            iyp = np.tile(iy,[ix.shape[0],1]).T
            ixp = np.tile(ix,[iy.shape[0],1])
            score0[k,iyp,ixp] = sp
            Ix0[k,iyp,ixp] = sx;
            Iy0[k,iyp,ixp] = sy;  
        
        
        #% At each parent location, for each parent mixture 1:L, compute best child mixture 1:K    
        N  = Nx*Ny;
        i0 = np.arange(N).reshape(Ny,Nx) 
        i0row,i0col = np.unravel_index(i0, [Ny,Nx])
        score = np.zeros([L,Ny,Nx],dtype='float64')
        Ix = np.zeros([L,Ny,Nx]).astype(int)
        Iy = np.zeros([L,Ny,Nx]).astype(int)
        Ik = np.zeros([L,Ny,Nx]).astype(int)
        for l in range(L):
            b = child_part['b'][0,l,:]
            score0b = (score0.transpose([1,2,0])+b).transpose([2,0,1])
            score[l,:,:]= score0b.max(axis=0)
            I = score0b.argmax(axis=0)    
            #print  Ix.shape, Ix0.shape,  i0row.shape,i0col.shape,I.shape  
            Ix[l,:,:]  = Ix0[I,i0row,i0col]
            Iy[l,:,:]  = Iy0[I,i0row,i0col]
            Ik[l,:,:]    = I

        return score,Ix,Iy,Ik
예제 #33
0
from scipy import ndimage 
import scipy
# import numpy for standard numerical calculations
import numpy as np 

# read the image with mahotas as a grey image
img=m.imread('./testimg4.jpg',as_grey=True)
# read the image with mahotas again to obtain a color image where we can draw the ReebGraph in red (vertices) and green (edges)
imgColor=m.imread('./testimg4.jpg')
# Threshhold to remove artifacts from the jpg compression
img=(img>100)
#get the dimensions of the image
x,y = np.shape(img)

#use the distance transform to obtain the distances per pixel of the medial axis
dmap = m.distance(img,metric='manhatten')
#use mathamatical morphology to obtain the medial axis (thinning function of mahotas)
skelImg=m.thin(img)

# draw the medial axis in the image
for idx,i in enumerate(skelImg):
    for jdx,j in enumerate(i):
        if skelImg[idx,jdx]==True:
            imgColor[idx,jdx]=(255,1,1)
            try:
                imgColor[idx+1,jdx]=(255,1,1)
            except:
                pass
            imgColor[idx-1,jdx]=(255,1,1)
            try:
                imgColor[idx,jdx+1]=(255,1,1)
filtered = mh.gaussian_filter(image, 10) # Koristimo gauss filter za izostravanje slike.
result = filtered.astype('uint8') # Funckija mh.gaussian_filter() postavlja vrijednost varijable u float64.
rmax = mh.regmax(result) # Trazi maksimalne vrijednosti.

print rmax
pylab.imshow(mh.overlay(image, rmax))
pylab.show()

labeled, nr_objects = mh.label(rmax)
# mh.overlay() funckija postavlja img kao pozadinu a preko nje postavlja vrijednosti variable rmax u crvenom kanalu.

print ('Broj pronadenih objekata je {}.'.format(nr_objects))
pylab.imshow(labeled)
# pylab.gray()
pylab.show()

dist = mh.distance(result)
dist = dist.max() - dist
dist -= dist.min()
dist = dist/float(dist.ptp()) * 255
dist = dist.astype(np.uint8)
objects = mh.cwatershed(dist, labeled)
whole = mh.segmentation.gvoronoi(objects) # Voronoi segemtacija, svaki piksel poprima vrijednost najblizeg maksimuma.
pylab.imshow(objects)
pylab.show()

# print (labeled).shape
# print (labeled).max()
# print (labeled).min()

예제 #35
0
from __future__ import print_function

import pylab as p
import numpy as np
import mahotas

f = np.ones((256, 256), bool)
f[200:, 240:] = False
f[128:144, 32:48] = False
# f is basically True with the exception of two islands: one in the lower-right
# corner, another, middle-left

dmap = mahotas.distance(f)
p.imshow(dmap)
p.show()
sizes = mahotas.labeled.labeled_size(colonies)
# print(sizes)

too_small = np.where(sizes < 100)
colonies = mahotas.labeled.remove_regions(colonies, too_small)
#colonies = mahotas.labeled.remove_bordering(colonies)
colonies, n_colonies = mahotas.labeled.relabel(colonies)
print('Found {} colonies.'.format(n_colonies))
# plt.imshow(colonies)
# print(colonies)


# Investigate nuclei within cell clusters

# Now, we compute the distance transform:
distances = mahotas.stretch(mahotas.distance(local_threshed))

# We find and label the regional maxima:
Bc = np.ones((9,9))

maxima = mahotas.morph.regmax(distances, Bc=Bc)
spots,n_spots = mahotas.label(maxima, Bc=Bc)
print('Found {} maxima.'.format(n_spots))
# plt.imshow(spots)

# Finally, to obtain the image above, we invert the distance transform
# (because of the way that cwatershed is defined) and compute the watershed:
surface = (distances.max() - distances)
areas = mahotas.cwatershed(surface, spots)
areas *= local_threshed
예제 #37
0
import numpy as np
import mahotas
from pylab import imshow, savefig
A = np.zeros((100,100), bool)
A[40:60] = 1
W = mahotas.thin(A)
D = mahotas.distance(~W)
imshow(D)
savefig('distance.png')

예제 #38
0
        #load segmentation results
        labels = []

        for i, row in df.reset_index().iterrows():
            label = io.imread(analysis_dir + 'segm/' + row['Filename'])
            labels.append(label)
        labels = np.array(labels)
        segs = labels > 0

        #Foward and backward
        k = 0
        while (k < 2 * (repeat + 1)):
            for i in range(df.shape[0] - N):
                intersect = np.prod(segs[i:i + N], axis=0)
                intersect_l = measure.label(intersect)
                distances = mh.distance(segs[i])
                surface = (distances.max() - distances)

                areas = mh.cwatershed(surface, intersect_l)
                new_label = areas * segs[i]

                labels[i] = new_label
                segs[i] = new_label > 0

            labels = labels[::-1]
            segs = segs[::-1]
            k += 1

        #Save refined segmentations
        for i, row in df.reset_index().iterrows():
            io.imsave(analysis_dir + 'segm_refined/' + row['Filename'],
예제 #39
0
    def detectMyotube(self,
                      segmentationMethod='seg1',
                      sizeThresh=0,
                      tophat=True,
                      tophatImgList=[]):
        if tophat == True and tophatImgList == []:
            tophatImgList = self.tophatAllPlanes()
        elif tophat == True and tophatImgList != []:
            #tophatImgList = tophatImgList[tophatImgList.keys()[0]]
            tophatImgList = tophatImgList[0]
        elif tophat == False:
            tophatImgList = self.images

        # median -> histeq -> otsu -> segmentation (histeq and otsu by region)
        if segmentationMethod == 'seg1':
            img_mip = self.maximumIntensityProjection(tophatImgList)
            img_median = self.smooth(img_mip)
            img_histeq, _ = imtools.histeq(img_median)
            T = otsu(np.uint(img_histeq), ignore_zeros=True)
            img_bin = (np.uint(img_histeq) > T)
            img_labeled, _ = label(img_bin)
            markers, counts = self.segmentTubes1(img_labeled, img_histeq)
            # segmentation by watershed
            img_labeled = img_bin * cwatershed(-distance(img_bin), markers)
            result = {
                'MIP': img_mip,
                'median': img_median,
                'histEq': img_histeq,
                'otsu': T,
                'bin': img_bin
            }

        # median -> otsu -> segmentation (histeq and otsu by region)
        elif segmentationMethod == 'seg2':
            img_mip = self.maximumIntensityProjection(tophatImgList)
            img_median = self.smooth(img_mip)
            T = otsu(np.uint(img_median), ignore_zeros=True)
            img_bin = (np.uint(img_median) > T)
            img_labeled, _ = label(img_bin)
            markers, counts = self.segmentTubes2(img_labeled, img_median)
            # segmentation by watershed
            img_labeled = img_bin * cwatershed(-distance(img_bin), markers)
            result = {
                'MIP': img_mip,
                'median': img_median,
                'otsu': T,
                'bin': img_bin
            }

        # median -> histeq -> otsu -> segmentation (histeq and cut regions less than mean-intensity by region)
        elif segmentationMethod == 'seg3':
            img_mip = self.maximumIntensityProjection(tophatImgList)
            img_median = self.smooth(img_mip)
            img_histeq, _ = imtools.histeq(img_median)
            T = otsu(np.uint(img_histeq), ignore_zeros=True)
            img_bin = (np.uint(img_histeq) > T)
            img_labeled, _ = label(img_bin)
            markers, counts = self.segmentTubes3(img_labeled, img_histeq)
            # segmentation by watershed
            img_labeled = img_bin * cwatershed(-distance(img_bin), markers)
            result = {
                'MIP': img_mip,
                'median': img_median,
                'histEq': img_histeq,
                'otsu': T,
                'bin': img_bin
            }

        # median -> histeq -> otsu -> segmentation (cut regions less than mean-intensity by region)
        elif segmentationMethod == 'seg4':
            img_mip = self.maximumIntensityProjection(tophatImgList)
            img_median = self.smooth(img_mip)
            img_histeq, _ = imtools.histeq(img_median)
            T = otsu(np.uint(img_histeq), ignore_zeros=True)
            img_bin = (np.uint(img_histeq) > T)
            img_labeled, _ = label(img_bin)
            markers, counts = self.segmentTubes4(img_labeled, img_histeq)
            # segmentation by watershed
            img_labeled = img_bin * cwatershed(-distance(img_bin), markers)
            result = {
                'MIP': img_mip,
                'median': img_median,
                'histEq': img_histeq,
                'otsu': T,
                'bin': img_bin
            }

        # median -> otsu -> segmentation (cut regions less than mean-intensity by region)
        elif segmentationMethod == 'seg5':
            img_mip = self.maximumIntensityProjection(tophatImgList)
            img_median = self.smooth(img_mip)
            T = otsu(np.uint(img_median), ignore_zeros=True)
            img_bin = (np.uint(img_median) > T)
            img_labeled, _ = label(img_bin)
            markers, counts = self.segmentTubes4(img_labeled, img_median)
            # segmentation by watershed
            img_labeled = img_bin * cwatershed(-distance(img_bin), markers)
            result = {
                'MIP': img_mip,
                'median': img_median,
                'otsu': T,
                'bin': img_bin
            }

        # non-segmentation
        else:
            img_mip = self.maximumIntensityProjection(tophatImgList)
            img_median = self.smooth(img_mip)
            img_histeq, _ = imtools.histeq(img_median)
            T = otsu(np.uint(img_histeq))
            img_bin = (np.uint(img_histeq) > T)
            img_labeled, counts = label(img_bin)
            result = {
                'MIP': img_mip,
                'median': img_median,
                'histEq': img_histeq,
                'otsu': T,
                'bin': img_bin
            }
            print('non-segmentation')
        print('Otsu\'s threshold:', T)
        print('Found {} objects.'.format(counts))
        sizes = labeled_size(img_labeled)
        img_labeled = remove_regions_where(
            img_labeled, sizes < sizeThresh)  #origin 10000, triangle 8585
        img_relabeled, counts = relabel(img_labeled)
        result['label'] = img_relabeled
        result['count'] = counts
        print('After filtering and relabeling, there are {} objects left.'.
              format(counts))
        result['labeledSkeleton'] = self.labeledSkeleton(img_relabeled)
        return ProcessImages(result)
예제 #40
0
def compare_slow(bw):
    for metric in ('euclidean', 'euclidean2'):
        f = distance(bw, metric)
        sd = _slow_dist(bw, metric)
        assert np.all(f == sd)
예제 #41
0
    def extract(self):
        '''Extracts point pattern features.

        Returns
        -------
        pandas.DataFrame
            extracted feature values for each object in
            :attr:`label_image <jtlib.features.PointPattern.label_image>`
        '''

        logger.info('extract point pattern features')
        features = dict()
        for obj in self.parent_object_ids:
            parent_obj_img = self.get_parent_object_mask_image(obj)
            points_img = self.get_points_object_label_image(obj)
            point_ids = np.unique(points_img)[1:]
            mh.labeled.relabel(points_img, inplace=True)

            size = np.sum(parent_obj_img)
            abs_border_dist_img = mh.distance(parent_obj_img).astype(float)
            rel_border_dist_img = abs_border_dist_img / size
            centroids = mh.center_of_mass(points_img, labels=points_img)
            centroids = centroids[1:, :].astype(int)

            indexer = np.arange(centroids.shape[0])
            if len(indexer) == 0:
                continue
            if len(indexer) == 1:
                y, x = centroids[0, :]
                values = [
                    abs_border_dist_img[y, x],
                    rel_border_dist_img[y, x],
                    np.nan,
                    np.nan,
                    np.nan,
                    np.nan,
                    np.nan,
                    np.nan
                ]
                features[point_ids[0]] = values
                continue
            for i, c in enumerate(centroids):
                abs_distances = cdist([c], centroids)[0, :]
                rel_distances = abs_distances / size
                idx = indexer != i
                y, x = c
                values = [
                    abs_border_dist_img[y, x],
                    rel_border_dist_img[y, x],
                    np.nanmin(abs_distances[idx]),
                    np.nanmin(rel_distances[idx]),
                    np.nanmean(abs_distances[idx]),
                    np.nanmean(rel_distances[idx]),
                    np.nanstd(abs_distances[idx]),
                    np.nanstd(rel_distances[idx]),
                ]
                features[point_ids[i]] = values

        ids = features.keys()
        values = list()
        nans = [np.nan for _ in range(len(self.names))]
        for i in self.object_ids:
            if i not in ids:
                logger.warn('values missing for object #%d', i)
                features[i] = nans
            values.append(features[i])
        return pd.DataFrame(values, columns=self.names, index=self.object_ids)
예제 #42
0
def region_growing(labelImg):
    distances = mahotas.stretch(mahotas.distance(labelImg > 0))
    surface = numpy.int32(distances.max() - distances)
    areas = mahotas.cwatershed(surface, labelImg)
    return areas
예제 #43
0
print(nr_objects)
plt.imshow(labeled) 
plt.jet()
@interact(sigma=(1.,16.)) 
def check_sigma(sigma):
    dnaf = mh.gaussian_filter(dna.astype(float), sigma) 
    maxima = mh.regmax(mh.stretch(dnaf))
    maxima = mh.dilate(maxima, np.ones((5,5))) 
    plt.imshow(mh.as_rgb(np.maximum(255*maxima, dnaf), dnaf, dna > T_mean))
sigma = 12.0

dnaf = mh.gaussian_filter(dna.astype(float),sigma) 
maxima = mh.regmax(mh.stretch(dnaf)) 
maxima,_= mh.label(maxima) 
plt.imshow(maxima)
dist = mh.distance(bin_image) 
plt.imshow(dist)
dist = 255 - mh.stretch(dist)
watershed = mh.cwatershed(dist,maxima) 
plt.imshow(watershed)
watershed *= bin_image 
plt.imshow(watershed)
watershed = mh.labeled.remove_bordering(watershed) 
plt.imshow(watershed)
sizes = mh.labeled.labeled_size(watershed)
# The conversion below is not necessary in newer versions of mahotas: watershed = watershed.astype(np.intc)
@interact(min_size=(100,4000,20)) 
def do_plot(min_size):
    filtered = mh.labeled.remove_regions_where(watershed, sizes < min_size) 
    print("filtering {}...".format(min_size))
    plt.imshow(filtered) 
예제 #44
0
import mahotas
import numpy as np
from matplotlib import pyplot as plt
import random
from matplotlib import colors as c

nuclear = mahotas.imread('data/flower.png')
nuclear = nuclear[:, :, 0]
nuclear = mahotas.gaussian_filter(nuclear, 1.)
threshed = (nuclear > nuclear.mean())
distances = mahotas.stretch(mahotas.distance(threshed))
Bc = np.ones((9, 9))

maxima = mahotas.morph.regmax(distances, Bc=Bc)
spots, n_spots = mahotas.label(maxima, Bc=Bc)
surface = (distances.max() - distances)
areas = mahotas.cwatershed(surface, spots)
areas *= threshed

plt.jet()

rmap = c.ListedColormap(np.random.rand(256, 3))

plt.imshow(areas, cmap=rmap)
plt.show()
colonies, n_colonies = mahotas.label(threshed)
sizes = mahotas.labeled.labeled_size(colonies)
# print(sizes)

too_small = np.where(sizes < 100)
colonies = mahotas.labeled.remove_regions(colonies, too_small)
#colonies = mahotas.labeled.remove_bordering(colonies)
colonies, n_colonies = mahotas.labeled.relabel(colonies)
print('Found {} colonies.'.format(n_colonies))
# plt.imshow(colonies)
# print(colonies)

# Investigate nuclei within cell clusters

# Now, we compute the distance transform:
distances = mahotas.stretch(mahotas.distance(local_threshed))

# We find and label the regional maxima:
Bc = np.ones((9, 9))

maxima = mahotas.morph.regmax(distances, Bc=Bc)
spots, n_spots = mahotas.label(maxima, Bc=Bc)
print('Found {} maxima.'.format(n_spots))
# plt.imshow(spots)

# Finally, to obtain the image above, we invert the distance transform
# (because of the way that cwatershed is defined) and compute the watershed:
surface = (distances.max() - distances)
areas = mahotas.cwatershed(surface, spots)
areas *= local_threshed
예제 #46
0
def keypress(event):
    global nodes, edges, node_labels, G, root, select
    print('press', event.key)
    sys.stdout.flush()

    if event.key == 'x':  # delete closest branch
        e = findClosestEdge(G, (event.xdata, event.ydata))
        undo_stack.append((G.copy(), root, Cercle))
        G.remove_edge(*e)

        updateMeasures(G, root)
        plot_graph(G)
        fig.canvas.draw()

    if event.key == 'u':  # undo
        if len(undo_stack) > 0:
            print('Undo')
            G, root, C = undo_stack.pop()
            setRoot(root)
            updateMeasures(G, root)
            updateCircles(G, C)
            plot_graph(G)
            fig.canvas.draw()
        else:
            print('No further undo')

    if event.key == 'r':  #re-build the graph from skeleton
        print('Rebuilding tree')
        undo_stack.append((G.copy(), root, Cercle))
        G = buildGraph(img, skel)
        plot_graph(G)
        updateCircles(G, Cercle)
        fig.canvas.draw()

    if event.key == 'n':  #add node
        p = findClosestSkel(skel2, (event.xdata, event.ydata))
        print('closest node is (%5.1f, %5.1f)' % p)
        undo_stack.append((G.copy(), root, Cercle))
        print('adding node')
        try:
            addNodeSkel(G, p, skel2, j, t, dmap)
        except:
            print('  distance transform...')
            dmap = mh.distance(img)
            addNodeSkel(G, p, skel2, j, t, dmap)

        Cercle[p] = plt.Circle(p,
                               radius=(G.node[p]['dia'] / 2),
                               alpha=terminal_disk_alpha,
                               color=terminal_disk_color)
        updateCircles(G, Cercle)
        updateMeasures(G, root)
        report(G)
        plot_graph(G)
        fig.canvas.draw()

    if select == None:  # verify the selection
        print('No active selection, please select a node')
        return

    if event.key == 't':  # change the root
        undo_stack.append((G.copy(), root, Cercle))
        root = select
        print('New root: ' + str(root))
        setRoot(root)
        updateMeasures(G, root)
        report(G)

        plot_graph(G)
        fig.canvas.draw()

    if event.key == 'd':  # delete closest node
        p = select
        if p == root:
            print('Cannot remove the root.')
            return
        undo_stack.append((G.copy(), root, Cercle))
        deleteNode(G, p)
        Cercle[p].remove()
        #report(G)

        updateMeasures(G, root)
        plot_graph(G)
        fig.canvas.draw()

    if event.key == 'a':  #marking of fertile nodes
        p = select
        print('this node is now fertile')
        undo_stack.append((G.copy(), root, Cercle))
        G.node[p]['node_color'] = node_color_fertile
        G.node[p]['fertile'] = True
        G[p][G.node[p]['parent']]['fertile'] = True
        report(G)
        plot_graph(G)
        fig.canvas.draw()

    if event.key == 'alt+e':  #hide any latest selection
        print('hide caracteristics')
        undo_stack.append((G.copy(), root, Cercle))
        for p in G:
            if G.node[p]['node_color'] != node_color:
                G.node[p]['node_color'] = node_color
                plot_graph(G)
                fig.canvas.draw()
        report(G)

    if event.key == 'b':  # diameter modification
        p = select
        print('closest node is (%5.1f, %5.1f)' % p)
        undo_stack.append((G.copy(), root, Cercle))
        G.node[p]['dia_2'] = 2 * dist(p, (event.xdata, event.ydata))
        updateMeasures(G, root)
        Cercle[p].remove()
        Cercle[p] = plt.Circle(p,
                               radius=G.node[p]['dia_2'] / 2,
                               alpha=terminal_disk_alpha,
                               color=terminal_disk_color)
        plt.gca().add_patch(Cercle[p])
        plot_graph(G)
        fig.canvas.draw()
예제 #47
0
def generate_image(cells, shape, max_dist=5):
    thetas = 360 * np.random.rand(len(cells))
    data_list = [cell.data.rotate(theta) for cell, theta in zip(cells, thetas)]
    assert all([data.names == data_list[0].names for data in data_list
                ]), 'All cells must have the same data elements'
    out_dict = {
        name: np.zeros(shape)
        for name, dclass in zip(data_list[0].names, data_list[0].dclasses)
        if dclass != 'storm'
    }
    for i, data in enumerate(data_list):
        valid_position = False
        while not valid_position:
            pos_x = int(np.round(shape[1] * np.random.rand()))
            pos_y = int(np.round(shape[0] * np.random.rand()))

            min1 = pos_y - int(np.floor(data.shape[0]))
            max1 = min1 + data.shape[0]

            min2 = pos_x - int(np.floor(data.shape[1]))
            max2 = min2 + data.shape[1]

            # Crop the data for when the cell is on the border of the image
            d_min1 = np.max([0 - min1, 0])
            d_max1 = np.min(
                [data.shape[0] + (shape[0] - pos_y), data.shape[0]])

            d_min2 = np.max([0 - min2, 0])
            d_max2 = np.min(
                [data.shape[1] + (shape[1] - pos_x), data.shape[1]])

            data_cropped = data[d_min1:d_max1, d_min2:d_max2]

            # Limit image position to the edges of the image
            min1 = np.max([min1, 0])
            max1 = np.min([max1, shape[0]])
            min2 = np.max([min2, 0])
            max2 = np.min([max2, shape[1]])

            temp_binary = np.zeros(shape)
            temp_binary[min1:max1, min2:max2] = data_cropped.binary_img
            out_binary = (out_dict['binary'] > 0).astype(int)
            distance_map = mh.distance(1 - out_binary, metric='euclidean')

            if np.any(distance_map[temp_binary.astype(bool)] < max_dist):
                continue

            valid_position = True

        for name in data.names:
            data_elem = data_cropped.data_dict[name]
            if data_elem.dclass == 'storm':
                data_elem['x'] += min2
                data_elem['y'] += min1

                xmax, ymax = shape[1], shape[0]
                bools = (data_elem['x'] < 0) + (data_elem['x'] > xmax) + (
                    data_elem['y'] < 0) + (data_elem['y'] > ymax)
                data_out = data_elem[~bools].copy()
                if name in out_dict:
                    out_dict[name] = np.append(out_dict[name], data_out)
                else:
                    out_dict[name] = data_out

                continue
            elif data_elem.dclass == 'binary':
                out_dict[name][min1:max1, min2:max2] += ((i + 1) * data_elem)
            else:
                out_dict[name][min1:max1, min2:max2] += data_elem

    return out_dict
예제 #48
0
import numpy as np
from matplotlib import pyplot as plt

try:
    nuclear_path = path.join(
                    path.dirname(path.abspath(__file__)),
                    'data',
                    'nuclear.png')
except NameError:
    nuclear_path = path.join('data', 'nuclear.png')

nuclear = mahotas.imread(nuclear_path)
nuclear = nuclear[:,:,0]
nuclear = mahotas.gaussian_filter(nuclear, 1.)
threshed  = (nuclear > nuclear.mean())
distances = mahotas.stretch(mahotas.distance(threshed))
Bc = np.ones((9,9))

maxima = mahotas.morph.regmax(distances, Bc=Bc)
spots,n_spots = mahotas.label(maxima, Bc=Bc)
surface = (distances.max() - distances)
areas = mahotas.cwatershed(surface, spots)
areas *= threshed



import random
from matplotlib import colors as c
colors = map(plt.cm.jet,range(0, 256, 4))
random.shuffle(colors)
colors[0] = (0.,0.,0.,1.)
예제 #49
0
파일: vsk_utils.py 프로젝트: thouis/icon
def region_growing(labelImg):
    distances = mahotas.stretch(mahotas.distance(labelImg>0))
    surface = numpy.int32(distances.max() - distances)
    areas = mahotas.cwatershed(surface, labelImg)
    return areas
예제 #50
0
def test_uint8():
    # This did not work correctly in 0.9.5
    a8 = np.zeros((5,5), dtype=np.uint8)
    ab = np.zeros((5,5), dtype=bool)
    assert np.all(distance(a8) == distance(ab))
예제 #51
0
if __name__ == '__main__':
    pylab.axis('off')

    img = cv2.imread('img2.png')
    img = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
    img /= 255

    # Rutovitz
    rut = nc_rutovitz(img)
    pylab.imshow(rut, interpolation='nearest')
    pylab.savefig('rutovitz.png', bbox_inches='tight')

    # Yokoi 4
    yokoi4 = nc_yokoi(img, 4)
    pylab.imshow(yokoi4, interpolation='nearest')
    pylab.savefig('yokoi4.png', bbox_inches='tight')

    # Yokoi 8
    yokoi8 = nc_yokoi(img, 8)
    pylab.imshow(yokoi8, interpolation='nearest')
    pylab.savefig('yokoi8.png', bbox_inches='tight')

    # Transformada de distancia
    img = cv2.imread('img.png')
    img = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
    dist = mahotas.distance(img, metric='euclidean')
    pylab.imshow(dist, interpolation='nearest')
    pylab.gray()
    pylab.savefig('dist.png', bbox_inches='tight')