示例#1
0
def calculate_min_distance(hsu,nhru,cluster_ids,lats,lons,clats,clons):
  radius = 6367.0
  # Minimum distance between HRUs

  # Get lat lon from the borders
  idx = (cluster_ids == hsu)
  idx = clear_border(idx,bgval=False)
  idx = find_boundaries(idx, mode='inner')
  bd_lats = lats[idx].flatten()
  bd_lons = lons[idx].flatten()

  if len(bd_lats) < 1 :
   idx = (cluster_ids == hsu)
   idx = find_boundaries(idx, mode='inner')
   bd_lats = lats[idx].flatten()
   bd_lons = lons[idx].flatten()

  # Get unique lat,lon values and sample 50 points
  points = set(zip(bd_lats,bd_lons))
  nsamp = 1#30
  if len(points) <= nsamp: nsamp = int(len(points)/2.)
  if len(points) <= 5: nsamp = len(points)

  points = random.sample(points, nsamp)
  bd_lats = np.array(list(zip(*points))[0])
  bd_lons = np.array(list(zip(*points))[1])

  distance = np.ones(nhru)*10000000.

  #Calculate the distance of a boundary to a centroid of each hru

  for lat, lon in zip(bd_lats,bd_lons):
    dlat = np.radians(lat-clats)
    dlon = np.radians(lon-clons)
    a = np.sin(dlat/2) * np.sin(dlat/2) + np.cos(np.radians(clats)) \
      * np.cos(np.radians(lat)) * np.sin(dlon/2) * np.sin(dlon/2)
    c = np.zeros((len(a)))
    for count in range(len(a)):
      c[count] = 2 * np.math.atan2(np.sqrt(a[count]), np.sqrt(1-a[count]))
    dist = radius * c
    distance[dist < distance] = dist[dist < distance]
#  for hrs in range(nhru):
#    if hrs == hsu:
#      distance[hrs] = 0.0
#    else:
#      clat = clats[hrs]
#      clon = clons[hrs]
#
#      for lat, lon in zip(bd_lats,bd_lons):
#        dlat = np.radians(lat-clat)
#        dlon = np.radians(lon-clon)
#        a = np.sin(dlat/2) * np.sin(dlat/2) + np.cos(np.radians(clat)) \
#          * np.cos(np.radians(lat)) * np.sin(dlon/2) * np.sin(dlon/2)
#        c = 2 * np.math.atan2(np.sqrt(a), np.sqrt(1-a))
#        dist = radius * c
#        if dist < distance[hrs]: distance[hrs] = dist
#      #print hsu, hrs, dist, distance[hrs] 

  #print hsu, distance
  return distance
def findBoundaries(boneMap, cartMap):
  boneBoundary = find_boundaries(boneMap, mode = 'inner')#.astype(int)
  cartBoundary = find_boundaries(cartMap).astype(int)
  bci = numpy.multiply(boneBoundary, cartBoundary)
  
  boneBoundaryCart = find_boundaries(boneMap).astype(int)
  cartBoundaryCart = find_boundaries(cartMap, mode = 'inner').astype(int)
  bciCart = numpy.multiply(boneBoundaryCart, cartBoundaryCart)
  articularSurface = boneBoundaryCart = bciCart
  return(bci.astype('float32'), articularSurface.astype('float32'))
示例#3
0
def findBinaryMapBoundaries(boneMap, cartMap):
  # boneBoundary = find_boundaries(boneMap, mode = 'inner').astype(int) #this identifies points inside the bone (NOT cartilage)
#   cartBoundary = find_boundaries(cartMap).astype(int)# this creates a "double" boundary where there are two pixels thick, one inside and one outside of the cartilage.
#   adjacentBone = numpy.multiply(boneBoundary, cartBoundary) #by multiplying these we end up with only the ones that are "in" the bone but adjacent to cartialge.
#   bci = numpy.multiply
  
  boneBoundaryCart = find_boundaries(boneMap).astype('int') #here we identify  the bone (two pixels thick) - inside and outside. 
  cartBoundaryInner = find_boundaries(cartMap, mode = 'inner').astype('int') #Here we identify only the points that are "inside" the cartilage.... its definitely "cartilage"
  bciCart = numpy.multiply(boneBoundaryCart, cartBoundaryInner)# this gives us the "inside" of the bone/cartilage interface. I.e. points on the boundary that are inside of the cartilage.
  articularSurface = cartBoundaryInner - bciCart #take pizels that are labeled as being the inside of the cartilage and subtract the points on the inside that are adjacent to the bone. This leaves us with just the points that are inside the cartilage but are NOT adjacent to the bone.
  return(bciCart.astype('float32'), articularSurface.astype('float32'))
def process_image(image):
    tic = time.clock()
    # rescale intensity
    p2, p98 = np.percentile(image, (1, 99.9))
    image = rescale_intensity(1.0*image, in_range=(p2, p98))

    # do simple filter based on color value
    thresh = 0.5*threshold_func(image)
    filtered_image = np.zeros_like(image,dtype=np.uint8) # set up all-zero image
    filtered_image[image > thresh] = 1 # filtered values set to 1

    # perform watershed transform to split clusters
    distance = ndi.distance_transform_edt(filtered_image)
    local_maxi = peak_local_max(distance, indices=False, footprint=morphology.square(7),
                            labels=filtered_image, exclude_border=False)
    markers = ndi.label(local_maxi)[0]

    # segment and label particles
    labels = morphology.watershed(-distance, markers, mask=filtered_image)
    backup_labels = labels.copy()

    # remove boundaries and restore any small particles deleted in this process
    labels[find_boundaries(labels)] = 0
    for i in np.unique(backup_labels)[1:]:
        if np.count_nonzero(labels[backup_labels == i]) == 0:
            labels[backup_labels == i] = i
    toc = time.clock()
    procTime = toc - tic
    return image, labels, procTime
示例#5
0
文件: get_base2.py 项目: wukm/cakepy
def get_bg_mask(img):
    
    #if img.ndim == 3:
    #    bg_mask = img.any(axis=-1)
    #    bg_mask = np.invert(bg_mask) # consistent with np.ma, True if masked

    #    # make multichannel (is it really this hard?)
    #    bg_mask = np.repeat(bg_mask[:,:,np.newaxis], 3, axis=2) 
    #
    #else:
    #    bg_mask = (img != 0)
    #    bg_mask = np.invert(bg_mask) # see above

    #bound = segmentation.find_boundaries(bg_mask, mode='inner', background=1)
    #bg_mask[bound] = 1
    #min_size = img.shape[0] * img.shape[1] // 4 
    #holes = morphology.remove_small_holes(bg_mask, min_size=min_size)
    #bg_mask[holes] = 1
    
    bg_mask = segmentation.find_boundaries(img)
    bg_mask = morphology.remove_small_objects(bg_mask)
    bg_mask = morphology.remove_small_holes(bg_mask)

    bg_mask = np.invert(bg_mask)
    return bg_mask
def shapesPlot(shapes,inds,fig,ax):
    
    from skimage.measure import label,regionprops
    from skimage import feature
    from skimage.morphology import binary_dilation
    from skimage.segmentation import find_boundaries
    import pylab as plt
    import numpy as np
    
    #fig = plt.figure()
    #ax = fig.add_subplot(111)
    sz = np.int32(shapes.shape)
    
    
    for i in inds:
        img = shapes[i,:,:]
        mx = img[:].max()
        test = img>0.4*mx
        test2 = binary_dilation(binary_dilation(test))
        lbls = label(test2)
        rgs = regionprops(lbls)
        if np.size(rgs)>0:
            szs = []
            for prop in rgs:
                szs.append(prop.area)
            ind = np.argmax(szs)
            if rgs[ind].area>100:
                pt = rgs[ind].centroid
                region = lbls==ind+1
                edges = find_boundaries(region)
                eln = edges.nonzero()
                ax.scatter(eln[1],eln[0],marker='.',color='r',linewidths=0.01)
                ax.text(pt[1]-4,pt[0]+4,'%i' % i,fontsize=14,color='k')
    
    return fig,ax
示例#7
0
def sample_points(img, n_points=100):
    """Sample points along edges in a binary image.

    Returns an array of shape ``(n_points, 2)`` in image coordinates.

    If there are several disconnected contours, they are sampled
    seperately and appended in order of their minimum distance to the
    origin of ``img`` in NumPy array coordinates.

    """
    # FIXME: what if contour crosses itself? for example: an infinity
    # symbol?
    assert img.ndim == 2
    assert n_points > 0

    boundaries = skeletonize(find_boundaries(img))

    # reorder along curves; account for holes and disconnected lines
    # with connected components.
    labels, n_labels = ndimage.label(boundaries, structure=np.ones((3, 3)))
    n_labeled_pixels = labels.sum()
    all_labels = range(1, n_labels + 1)
    curve_n_pixels = list((labels == lab).sum() for lab in all_labels)
    curve_n_points = list(int(np.ceil((n / n_labeled_pixels) * n_points))
                          for n in curve_n_pixels)

    # sample a linear subset of each connected curve
    samples = list(_sample_single_contour(labels == lab, n_points)
                   for lab, n_points in zip(all_labels, curve_n_points))

    # append them together. They should be in order, because
    # ndimage.label() labels in order.
    points = list(itertools.chain(*samples))
    return np.vstack(points)
示例#8
0
def plot_roi_bg(roi, bg, fg, pixel_per_um):
    bg_norm = _normalize8_img(bg)
    fg_norm = _normalize8_img(fg)

    borders = segmentation.find_boundaries(roi)

    frame_rgb = numpy.zeros((roi.shape[0], roi.shape[1], 3), dtype='uint8')
    frame_rgb[..., 0] = bg_norm
    frame_rgb[..., 1] = fg_norm
    frame_rgb[..., 0][borders] = 255
    frame_rgb[..., 1][borders] = 255
    frame_rgb = frame_rgb[::-1]

    figure = pyplot.figure(figsize=(5, 5))
    x_axis_end = roi.shape[0] * 1/pixel_per_um
    y_axis_end = roi.shape[1] * 1/pixel_per_um
    pyplot.imshow(frame_rgb, extent=[0, y_axis_end, 0, x_axis_end])
    for i in range(1, numpy.amax(roi)+1):
        coordinates_neuron = numpy.where(roi == i)
        pyplot.text((coordinates_neuron[1][0]+10) * 1/pixel_per_um,
                    (coordinates_neuron[0][0]+10) * 1/pixel_per_um,
                    i, fontsize=20, color='white')
    pyplot.xlabel("[um]")
    pyplot.ylabel("[um]")
    pyplot.tight_layout()
    return figure
示例#9
0
    def create_neighbour_matrix(self):
        bounds = find_boundaries(self.segments_slic)
        N = len(self.props)
        #matrice d'adjacence
        self.neighbourhood_matrix = np.zeros((N,N))
        #on parcourt les pixels pour savoir s'ils font partie d'une frontiere
        for row in range(bounds.shape[0]):
            for column in range(bounds.shape[1]):
                #si le pixel appartient a un frontiere, on check le label de ces voisins
                if bounds[row,column]==1:
                    if row != 0 and row != (bounds.shape[0]-1) and column != 0 and column != (bounds.shape[1]-1):
                        if self.segments_slic[row-1,column] != self.segments_slic[row+1,column]:
                            self.neighbourhood_matrix[self.segments_slic[row-1,column]-1,self.segments_slic[row+1,column]-1]=1
                            self.neighbourhood_matrix[self.segments_slic[row+1,column]-1,self.segments_slic[row-1,column]-1]=1
                        if self.segments_slic[row,column-1] != self.segments_slic[row,column+1]:
                            self.neighbourhood_matrix[self.segments_slic[row,column-1]-1,self.segments_slic[row,column+1]-1]=1
                            self.neighbourhood_matrix[self.segments_slic[row,column+1]-1,self.segments_slic[row,column-1]-1]=1
                #attention si on est sur un bord
                    elif row ==0 or row == bounds.shape[0]-1:
                            if self.segments_slic[row,column-1] != self.segments_slic[row,column+1]:
                                self.neighbourhood_matrix[self.segments_slic[row,column-1]-1,self.segments_slic[row,column+1]-1]=1
                                self.neighbourhood_matrix[self.segments_slic[row,column+1]-1,self.segments_slic[row,column-1]-1]=1

                    elif column == 0 or column == bounds.shape[1]-1:
                            if self.segments_slic[row-1,column] != self.segments_slic[row+1,column]:
                                self.neighbourhood_matrix[self.segments_slic[row-1,column]-1,self.segments_slic[row+1,column]-1]=1
                                self.neighbourhood_matrix[self.segments_slic[row+1,column]-1,self.segments_slic[row-1,column]-1]=1
示例#10
0
文件: webapi.py 项目: LouisK130/oii
def serve_blob(time_series,pid):
    """Serve blob zip or image"""
    pid_hit = pid_resolver.resolve(pid=pid)
    hit = blob_resolver.resolve(pid=pid,time_series=time_series)
    if hit is None:
        abort(404)
    zip_path = hit.value
    if hit.target is None: # bin, not target?
        if hit.extension != 'zip':
            abort(404)
        # the zip file is on disk, stream it directly to client
        return Response(file(zip_path), direct_passthrough=True, mimetype='application/zip', headers=max_age())
    else: # target, not bin
        blobzip = ZipFile(zip_path)
        png = blobzip.read(hit.lid+'.png')
        blobzip.close()
        # now determine PIL format and MIME type
        (pil_format, mimetype) = image_types(hit)
        if pid_hit.product == 'blob' and mimetype == 'image/png':
            return Response(png, mimetype='image/png', headers=max_age())
        else:
            # FIXME support more imaage types
            blob_image = Image.open(StringIO(png))
            if pid_hit.product == 'blob_outline':
                blob = np.asarray(blob_image.convert('L'))
                blob_outline = find_boundaries(blob)
                roi = np.asarray(get_stitched_roi(hit.bin_pid, int(hit.target)))
                blob = np.dstack([roi,roi,roi])
                blob[blob_outline] = [255,0,0]
                blob_image = Image.fromarray(blob,'RGB')
            return image_response(blob_image, pil_format, mimetype)
示例#11
0
def edge_curvature(mask, min_sep=5, average_over=3):
    '''
    Compute the menger curvature along the edges of the contours in the mask.
    '''

    labels = me.label(mask, neighbors=8, connectivity=2)

    edges = find_boundaries(labels, connectivity=2, mode='outer')

    pts = integer_boundaries(mask, edges, 0.5)

    curvature_mask = np.zeros_like(mask, dtype=float)

    for cont_pts in pts:
        # Last one is a duplicate
        cont_pts = cont_pts[:-1]

        num = cont_pts.shape[0]

        for i in xrange(num):

            curv = 0.0
            for j in xrange(min_sep, min_sep+average_over+1):
                curv += menger_curvature(cont_pts[i-j], cont_pts[i],
                                         cont_pts[(i+j) % num])

            y, x = cont_pts[i]

            if np.isnan(curv):
                curv = 0.0
            curvature_mask[y, x] = curv / average_over

    return curvature_mask
示例#12
0
def watershed_separation(image, s_elem):
    distance = ndi.distance_transform_edt(image)
    local_maxi = peak_local_max(distance, indices=False, footprint=s_elem, labels=image)
    markers = ndi.label(local_maxi)[0]

    seg = watershed(-distance, markers, mask=image)

    lines = find_boundaries(seg, mode='outer', background=True)
    lines = binary_dilation(lines, s_elem)
    return subtraction(image, lines)
示例#13
0
def labels_to_masks(labels,num_feature=None,include_boundary=False,feature_value=False,**kwargs):
    """
    convert labels array to list of masks

    Parameters
    ----------
    labels : array of labels to analyze

    num_features : number of features to analyze (Default None)
        if None, get num_feature from labels

    include_boundary : bool (Default False)
        if True, include boundary regions in output mask


    feature value : bool (Default False)
        value which indicates feature.
        False is MaskedArray convension
        True is image convension

    **kwargs : arguments to find_boundary if include_boundary is True
    mode='outer', connectivity=labels.ndim

    Returns
    -------
    output : list of masks of same shape as labels
        mask for each feature

    """


    if include_boundary:
        kwargs = dict(dict(mode='outer',connectivity=labels.ndim),**kwargs)
    
    if num_feature is None:
        num_feature = labels.max()


    output = []

    for i in range(1,num_feature+1):
        m = labels==i

        if include_boundary:
            b = find_boundaries(m.astype(int),**kwargs)
            m = m+b

        #right now mask is in image convesion
        #if fature_value is false, convert
        if not feature_value:
            m = ~m
        
        output.append(m)

    return output
示例#14
0
def test_find_boundaries_bool():
    image = np.zeros((5, 5), dtype=np.bool)
    image[2:5, 2:5] = True

    ref = np.array([[False, False, False, False, False],
                    [False, False,  True,  True,  True],
                    [False,  True,  True,  True,  True],
                    [False,  True,  True, False, False],
                    [False,  True,  True, False, False]], dtype=np.bool)
    result = find_boundaries(image)
    assert_array_equal(result, ref)
示例#15
0
    def outline_segments(self, mask_background=False):
        """
        Outline the labeled segments.

        The "outlines" represent the pixels *just inside* the segments,
        leaving the background pixels unmodified.  This corresponds to
        the ``mode='inner'`` in `skimage.segmentation.find_boundaries`.

        Parameters
        ----------
        mask_background : bool, optional
            Set to `True` to mask the background pixels (labels = 0) in
            the returned image.  This is useful for overplotting the
            segment outlines on an image.  The default is `False`.

        Returns
        -------
        boundaries : 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray`
            An image with the same shape of the segmenation image
            containing only the outlines of the labeled segments.  The
            pixel values in the outlines correspond to the labels in the
            segmentation image.  If ``mask_background`` is `True`, then
            a `~numpy.ma.MaskedArray` is returned.

        Examples
        --------
        >>> from photutils import SegmentationImage
        >>> segm = SegmentationImage([[0, 0, 0, 0, 0, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 0, 0, 0, 0, 0]])
        >>> segm.outline_segments()
        array([[0, 0, 0, 0, 0, 0],
               [0, 2, 2, 2, 2, 0],
               [0, 2, 0, 0, 2, 0],
               [0, 2, 0, 0, 2, 0],
               [0, 2, 2, 2, 2, 0],
               [0, 0, 0, 0, 0, 0]])
        """

        import skimage
        if LooseVersion(skimage.__version__) < LooseVersion('0.11'):
            raise ImportError('The outline_segments() function requires '
                              'scikit-image >= 0.11')
        from skimage.segmentation import find_boundaries

        outlines = self.data * find_boundaries(self.data, mode='inner')
        if mask_background:
            outlines = np.ma.masked_where(outlines == 0, outlines)
        return outlines
示例#16
0
def watershed(imgInput, cap=-1, sigma=1.0, min_dist_maxima=4):
    normFac = imgInput.max()
    imgSmoothed = normFac * gaussian_filter(imgInput/normFac, sigma=sigma)
    if cap==-1:
        cap = imgInput.max()
    imgCapped = np.minimum(imgSmoothed, cap*np.ones_like(imgSmoothed))
    # imgCapped = np.minimum(imgInput, cap*np.ones_like(imgInput))
    # imgSmoothed = gaussian_filter(imgCapped/imgCapped.max(), sigma=sigma)
    local_maxi = peak_local_max(imgCapped, indices=False, exclude_border=False, min_distance=min_dist_maxima )
    markers = ndi.label(local_maxi)[0]
    labels = ski.morphology.watershed(-imgInput, markers)
    boundaries = find_boundaries(labels, mode='thick')
    return labels, boundaries, markers, local_maxi, imgCapped
示例#17
0
文件: ipython.py 项目: LouisK130/oii
def as_masked(img,mask,outline=False):
    if len(img.shape)==2:
        rgb = gray2rgb(img)
    else:
        rgb = img
    copy = img_as_float(rgb,force_copy=True)
    if outline:
        (labels,_) = measurements.label(mask)
        boundaries = find_boundaries(labels)
        copy[boundaries] = [1,0,0]
    else:
        copy[mask] = [1,0,0]
    return copy
示例#18
0
 def _interp(img,mask,method):
     xi = np.where(mask)
     if len(xi[0])==0:
         return img
     edges = maximum_filter(find_boundaries(mask),3)
     edges[xi] = 0
     yp, xp = np.where(edges)
     values = seed[yp,xp]
     # add jitter to points to avoid https://beagle.whoi.edu/redmine/issues/2609
     yp = np.array(yp) + (random.standard_normal((yp.size)) * 0.0001)
     xp = np.array(xp) + (random.standard_normal((xp.size)) * 0.0001)
     fill = griddata((yp,xp),values,xi,method=method)
     img[xi] = fill
     return img
示例#19
0
def raster(cells):
    s_elem = Functions.fig(Functions.fig_size)
    image = cells.copy()

    image = np.invert(image)

    distance = ndi.distance_transform_edt(image)
    local_maxi = peak_local_max(distance, indices=False, footprint=s_elem, labels=image)
    markers = ndi.label(local_maxi)[0]
    seg2 = watershed(distance, markers)

    lines = find_boundaries(seg2, mode='outer', background=True)
    lines = binary_dilation(lines, s_elem)

    return lines
示例#20
0
def test_find_boundaries():
    image = np.zeros((10, 10), dtype=np.uint8)
    image[2:7, 2:7] = 1

    ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
                    [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
                    [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
                    [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
                    [0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
                    [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
                    [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

    result = find_boundaries(image)
    assert_array_equal(result, ref)
示例#21
0
文件: app.py 项目: LouisK130/oii
def serve_blob_image(parsed, mimetype, outline=False, target_img=None):
    # first, read the blob from the zipfile
    blob_zip = get_product_file(parsed, 'blobs')
    png_name = parsed['lid'] + '.png' # name is target LID + png extension
    png_data = get_zip_entry_bytes(blob_zip, png_name)
    # if we want a blob png, then pass it through without conversion
    if not outline and mimetype == 'image/png':
        return Response(png_data, mimetype='image/png')
    else:
        # read the png-formatted blob image into a PIL image
        pil_img = PIL.Image.open(StringIO(png_data))
        blob = np.array(pil_img.convert('L')) # convert to 8-bit grayscale
        if not outline: # unless we are drawing the outline
            return Response(as_bytes(blob), mimetype=mimetype) # format and serve
        else:
            blob_outline = find_boundaries(blob)
            roi = target_img
            blob = np.dstack([roi,roi,roi])
            blob[blob_outline] = [255,0,0]
            return Response(as_bytes(blob, mimetype), mimetype=mimetype)
示例#22
0
文件: get_base.py 项目: wukm/cakepy
def get_bg_mask(img):
    
    if img.ndim == 3:
        bg_mask = img.any(axis=-1)
        bg_mask = np.invert(bg_mask) # consistent with np.ma, True if masked

        # make multichannel (is it really this hard?)
        bg_mask = np.repeat(bg_mask[:,:,np.newaxis], 3, axis=2) 
    
    else:
        bg_mask = (img != 0)
        bg_mask = np.invert(bg_mask) # see above

    bound = segmentation.find_boundaries(bg_mask, mode='inner', background=1)
    bg_mask[bound] = 1
    
    holes = morphology.remove_small_holes(bg_mask)
    bg_mask[holes] = 1

    return bg_mask
示例#23
0
def test_make_outline_overlay():
    rgb_data = np.random.rand(2, 50, 50, 3)

    predictions = np.zeros((2, 50, 50, 1), dtype='int')
    predictions[0, :10, :10, 0] = 1
    predictions[0, 15:30, 30:45, 0] = 2
    predictions[1, 10:15, 25:35, 0] = 1
    predictions[1, 40:50, 0:10, 0] = 2

    overlay = plot_utils.make_outline_overlay(rgb_data=rgb_data, predictions=predictions)
    for img in range(predictions.shape[0]):
        outline = find_boundaries(predictions[img, ..., 0], connectivity=1, mode='inner')
        outline_mask = outline > 0
        assert np.all(overlay[img, outline_mask, 0] == 1)

    # invalid prediction shape
    with pytest.raises(ValueError):
        _ = plot_utils.make_outline_overlay(rgb_data=rgb_data, predictions=predictions[0])

    # more predictions than rgb images
    with pytest.raises(ValueError):
        _ = plot_utils.make_outline_overlay(rgb_data=rgb_data[:1], predictions=predictions)
def main(conf, logger=None):

    logger = logging.getLogger('plot_results_ksp')

    logger.info('--------')
    logger.info('Writing result frames to: ' + conf.dataOutDir)
    logger.info('--------')

    res = np.load(os.path.join(conf.dataOutDir, 'results.npz'))

    frame_dir = os.path.join(conf.dataOutDir, 'results')
    if (not os.path.exists(frame_dir)):
        logger.info('Creating output frame dir: {}'.format(frame_dir))
        os.makedirs(frame_dir)

    scores = (res['ksp_scores_mat'].astype('uint8')) * 255
    imgs = [io.imread(f) for f in conf.frameFileNames]
    truth_dir = os.path.join(conf.root_path, conf.ds_dir, conf.truth_dir)
    gts = [
        io.imread(f)
        for f in sorted(glob.glob(os.path.join(truth_dir, '*.png')))
    ]

    locs2d = csv.readCsv(
        os.path.join(conf.root_path, conf.ds_dir, conf.locs_dir,
                     conf.csvFileName_fg))

    for f in range(scores.shape[-1]):
        logger.info('{}/{}'.format(f + 1, scores.shape[-1]))
        cont_gt = segmentation.find_boundaries(gts[f], mode='thick')
        idx_cont_gt = np.where(cont_gt)

        im = csv.draw2DPoint(locs2d, f, imgs[f], radius=7)

        im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
        score_ = np.repeat(scores[..., f][..., np.newaxis], 3, axis=2)
        im_ = np.concatenate((im, score_), axis=1)

        io.imsave(os.path.join(frame_dir, 'im_{0:04d}.png'.format(f)), im_)
示例#25
0
    def get_region_edge_v2(self, boundary_region_index):
        """
        Args:
            region_id:
                region label in superpixels
        """

        region_initial = np.zeros((self.height, self.width))
        region_initial[boundary_region_index] = 1
        region_boundary_mask = find_boundaries(region_initial,
                                               mode='inner',
                                               background=0).astype(np.uint8)
        self.region_boundary_mask = region_boundary_mask
        # return 1
        # import pdb ; pdb.set_trace()

        # step-3: get all pixels index of a region for mapping
        # region_pixels_loc = np.argwhere(self.superpixel==region_id)
        num_pixels = boundary_region_index[0].shape[
            0]  # number of pixels in a region
        region_pixels_loc_tuple = [(boundary_region_index[0][i],
                                    boundary_region_index[1][i])
                                   for i in range(num_pixels)]

        # step-4: generate dict for mapping boundary mask point index and its coordinate
        mapping_dict = dict(zip(range(num_pixels), region_pixels_loc_tuple))

        # step-5: generate boundary point index from boundary mask
        region_boundary_point_index = np.where(region_boundary_mask == 1)

        # step-6: use region boundary_point_index to look up its coordinate
        # np.where return a tuple, fetch the np.ndarray with index [0]

        boundary_coordinate = np.array([
            mapping_dict[key]
            for key in region_boundary_point_index[0].tolist()
        ])

        return boundary_coordinate
示例#26
0
def seg2seeds_max(segmentation, beta=0.1, max_radius=10):
    boundary = find_boundaries(segmentation, connectivity=2)

    adjusted_seg = segmentation + 1
    adjusted_seg[boundary] = 0

    ids, counts = np.unique(adjusted_seg, return_counts=True)

    new_seg = np.zeros_like(adjusted_seg)
    seeds = np.zeros_like(adjusted_seg)

    for i, (_ids, _counts) in enumerate(zip(ids[1:], counts[1:])):
        mask = adjusted_seg == _ids

        dt_mask = distance_transform_edt(mask)
        mask_max = np.argmax(dt_mask)
        x, y = np.unravel_index(mask_max, mask.shape)
        seeds[x, y] = i + 1

        new_seg[(segmentation + 1) == _ids] = i

    return seeds, new_seg
示例#27
0
文件: conncomp.py 项目: ymcmrs/MintPy
    def get_large_label(mask, min_area=2.5e3, erosion_size=5, get_boundary=False, print_msg=False):
        # initial label
        label_img, num_label = measure.label(mask, connectivity=1, return_num=True)

        # remove regions with small area
        if print_msg:
            print('remove regions with area < {}'.format(int(min_area)))
        min_area = min(min_area, label_img.size * 3e-3)
        flag_slabel = np.bincount(label_img.flatten()) < min_area
        flag_slabel[0] = False
        label_small = np.where(flag_slabel)[0]
        for i in label_small:
            label_img[label_img == i] = 0
        label_img, num_label = measure.label(label_img, connectivity=1, return_num=True) # re-label

        # remove regions that would disappear after erosion operation
        erosion_structure = np.ones((erosion_size, erosion_size))
        label_erosion_img = morph.erosion(label_img, erosion_structure).astype(np.uint8)
        erosion_regions = measure.regionprops(label_erosion_img)
        if len(erosion_regions) < num_label:
            if print_msg:
                print('Some regions are lost during morphological erosion operation')
            label_erosion = [reg.label for reg in erosion_regions]
            for orig_reg in measure.regionprops(label_img):
                if orig_reg.label not in label_erosion:
                    if print_msg:
                        print('label: {}, area: {}, bbox: {}'.format(orig_reg.label, 
                                                                     orig_reg.area,
                                                                     orig_reg.bbox))
                    label_img[label_img == orig_reg.label] = 0
        label_img, num_label = measure.label(label_img, connectivity=1, return_num=True) # re-label

        # get label boundaries to facilitate bridge finding
        if get_boundary:
            label_bound = seg.find_boundaries(label_erosion_img, mode='thick').astype(np.uint8)
            label_bound *= label_erosion_img
            return label_img, num_label, label_bound
        else:
            return label_img, num_label
示例#28
0
def drawBoundAndBackground(img,mask,bg=None,replace=False,lines=50,
                           size=0.2,bound=True,boundmode='thick'):
    '''
    给出mask 将给mask区域填充背景色bg的线条 并加上黑白边框
    mask: 所要标注区域
    bg : 背景填充 可以为颜色|图片 默认为红色
    replace : 是否在原图上操作
    lines: 线条的间距
    size:线条多粗 即线条是间距的多少倍
    bound:是否画出边缘
    boundmode: thick 粗正好在边界 'inner'只在前景里面 但是较细
    '''
    assert mask.ndim ==2, 'mask 必须为布尔值'
    if not mask.any():
        return img
    isint = isNumpyType(img,int)
    if not replace:
        img = img.copy()
    if bg is None:
        bg =  [max(255,img.max()),128,0]if isint else [1.,.5,0]
    white = max(255,img.max()) if isint else 1.
    m,n=img.shape[:2]
    i,j = np.mgrid[:m,:n]
    
    step = (m+n)//2//lines
    a = int(step*(1-size))
    drawInd = ~np.where(((i%step<a)& (j%step<a)),True,False)
#    from tool import g
#    g.x = mask,drawInd, bg,img
    if isinstance(bg,np.ndarray) and bg.ndim >=2:
        img[mask*drawInd] = bg[mask*drawInd]
    else:
        img[mask*drawInd] = bg
    if bound:
        from skimage.segmentation import find_boundaries
        boundind = find_boundaries(mask, mode=boundmode,background=True)
        boundBg = np.where((i+j)%10<5,white,0)
        img[boundind] = boundBg[boundind][...,None]
    return (img)
示例#29
0
文件: test.py 项目: sjtukng/sandstone
def paramenter_comparison():

    im1 = cv2.imread("0degree.bmp", cv2.IMREAD_COLOR)
    im2 = cv2.imread("15degree.bmp", cv2.IMREAD_COLOR)
    im3 = cv2.imread("30degree.bmp", cv2.IMREAD_COLOR)
    im4 = cv2.imread("45degree.bmp", cv2.IMREAD_COLOR)
    gt = cv2.imread("gt.jpg", 0)
    max_im = ImageSynthesis.maximum_image(4, [im1, im2, im3, im4])
    thres = 3  # threshold of distance between edge pixel and g.t. edge pixel

    # Multi-channel SLIC, our method
    print("begin multi-channel slic")
    img = [im1, im2, im3, im4]

    ##	K = [200, 250, 300, 350, 400, 450, 500, 550, 600]
    ##	m = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]

    K = [300]
    m = [5]

    ##	outfile = file('parameter analysis.csv', 'wb')
    ##	writer = csv.writer(outfile)

    for i in range(len(K)):
        for j in range(len(m)):
            L = mslic.mslic(img, K[i], m[j], 1, 5)
            L = regions.distinct_label(L)
            print("after multi-channel slic, there are " + str(np.max(L)) +
                  " superpixels")
            L = clusters.merge_tiny_regions(im1, L, 200)
            print("after merging tiny regions, there are " + str(np.max(L)) +
                  " superpixels left")
            bond_L = segmentation.find_boundaries(L)

            print("K=" + str(K[i]) + " m=" + str(m[j]))
            recall = performance.boundary_recall(gt, bond_L, thres)
            precision = performance.precision(gt, bond_L, thres)
            print(str(recall))
            print(str(precision))
示例#30
0
def extract_seg(seg):
    labels = label(seg)
    regions_original = regionprops(labels)
    regions = list()
    for region in regions_original:
        if region.area > 100:
            boundary = find_boundaries(region.image,
                                       mode='thick').astype(np.uint8)
            rgba = np.zeros((boundary.shape[0], boundary.shape[1], 4), 'uint8')
            rgba[..., 0] = 255
            rgba[..., 3] = boundary * 255
            img = Image.fromarray(rgba)
            output_buffer = BytesIO()
            img.save(output_buffer, format='png')
            byte_data = output_buffer.getvalue()
            base64_str = "data:image/png;base64," + base64.b64encode(
                byte_data).decode('utf-8')
            regions.append(list(region.bbox) + [base64_str])

    # bbox: (min_row, min_col, max_row, max_col)

    return {'regions': regions, 'size': MODEL_IMG_SIZE}
示例#31
0
def save_prediction_image(_, panoptic_pred, img_info, out_dir, colors,
                          num_stuff):
    msk, cat, obj, iscrowd = panoptic_pred

    img = Image.open(img_info["abs_path"])

    # Prepare folders and paths
    folder, img_name = path.split(img_info["rel_path"])
    img_name, _ = path.splitext(img_name)
    out_dir = path.join(out_dir, folder)
    ensure_dir(out_dir)
    out_path = path.join(out_dir, img_name + ".jpg")

    # Render semantic
    sem = cat[msk].numpy()
    crowd = iscrowd[msk].numpy()
    sem[crowd == 1] = 255

    sem_img = Image.fromarray(colors[sem])
    sem_img = sem_img.resize(img_info["original_size"][::-1])

    # Render contours
    is_background = (sem < num_stuff) | (sem == 255)
    msk = msk.numpy()
    msk[is_background] = 0

    contours = find_boundaries(msk, mode="outer", background=0).astype(
        np.uint8) * 255
    contours = dilation(contours)

    contours = np.expand_dims(contours, -1).repeat(4, -1)
    contours_img = Image.fromarray(contours, mode="RGBA")
    contours_img = contours_img.resize(img_info["original_size"][::-1])

    # Compose final image and save
    out = Image.blend(img.convert(mode="RGB"), sem_img,
                      0.5).convert(mode="RGBA")
    out = Image.alpha_composite(out, contours_img)
    out.convert(mode="RGB").save(out_path)
示例#32
0
def make_spline_contour(arr, rc, ds_contour_rate=0.1, n=1000, s=0, k=3):

    arr_ = np.pad(arr, ((1, ), (1, )), mode='constant', constant_values=False)
    contour = segmentation.find_boundaries(arr_, mode='thick')
    y, x = np.where(contour)

    x = x.tolist()
    y = y.tolist()

    pts = [(x, y) for x, y in zip(x, y)]

    sort_fn = lambda pt: clockwiseangle(rc, pt)

    pts = sorted(pts, key=sort_fn)

    pts = np.array(pts)
    x, y = pts[:, 0], pts[:, 1]
    nodes = np.zeros([n, 2])
    [tck, u] = interpolate.splprep([x, y], s=s, k=k)
    [nodes[:, 0], nodes[:, 1]] = interpolate.splev(np.linspace(0, 1, n), tck)

    return nodes
示例#33
0
    def __call__(self, m):
        boundary = (m == 0).astype('uint8')
        results = [boundary]

        if self.thick_boundary:
            t_boundary = find_boundaries(m, connectivity=1, mode='outer', background=0)
            results.append(t_boundary)

        if self.lta is not None:
            z_affs = self.lta(m)
            for z_aff in z_affs:
                results.append(z_aff)

        if self.ignore_index is not None:
            for b in results:
                b[m == self.ignore_index] = self.ignore_index

        if self.append_label:
            # append original input data
            results.append(m)

        return np.stack(results, axis=0)
示例#34
0
    def __init__(self, img, ROI, region_size=50):

        h, w = img.shape[0:2]
        img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
        img = cv2.GaussianBlur(img, (3, 3), 0)
        n_segment = int((h * w) / (region_size**2))
        labels = slic(img, n_segment, compactness=10)
        contour_mask = find_boundaries(labels)
        contour_mask[ROI == 0] = 0

        labels[ROI == 0] = -1
        print(f'finished slic')
        self.labels_position = self.__get_labels_position(labels)
        print(f'finished get position')

        self.__remap_labels(labels)
        print(f'finished get position')

        self.labels = labels
        self.numOfPixel = np.max(labels) + 1
        self.contour_mask = contour_mask
        self.adjacent_pairs = self.__construct_adjacency(labels)
示例#35
0
def main(image_dir):

    # generate_image_for_labelling(
    #     'data_intermediate/ColFRI-semisq_PP2A_FLC_03/converted_S0_C0_Z20.png',
    #     'nuclei_to_mark_03.png'
    # )
    # convert_nuclei_centroids_to_voronoi('C2Results.csv', 'C2Voronoi.png')
    # generate_mask_image(
    #     'data_intermediate/ColFRI-semisq_PP2A_FLC_03/converted_S0_C1_Z20.png',
    #     'mask_03.png'
    # )

    # load_voronoi_and_experiment('C3Voronoi.png', 'mask_03.png')

    watershed = imread('scratch/shedded.png')
    borders = find_boundaries(watershed)
    imimsave('borders.png', borders)
    comp = make_composite_image(imread('scratch/adapt.png'), imread('markmeq.png'))

    comp[np.where(borders)] = [255, 255, 0]

    imsave('comp.png', comp)
示例#36
0
def generate_cell(image_filepath,
                  mask_filepath,
                  channel_list,
                  tile_shape,
                  outline=False):
    mask = io.imread(mask_filepath)
    with tifffile.TiffFile(image_filepath) as infile:
        img_shape = infile.series[0].pages[0].shape

        for region in measure.regionprops(mask):
            # calculate tile coordinate
            c = region.centroids
            txl = int(np.round(c[0] - tile_shape[0] / 2))
            tyl = int(np.round(c[1] - tile_shape[1] / 2))
            txu, tyu = txl + tile_shape[0], tyl + tile_shape[1]

            # skip cells too close to image edge
            checklist = [
                txl >= 0, txu < img_shape[0], tyl >= 0, tyu < img_shape[1]
            ]
            if not all(checklist):
                continue

            # compose
            cell = np.zeros(tile_shape + (len(channel_list), ))
            for channel in channel_list:
                img = infile.series[0].pages[channel].asarray(memmap=True)
                cell_img = img[txl:txu, tyl:tyu, channel]
                cell[..., channel] = img_as_float(cell_img)

            # add outline
            if outline:
                cm = mask[txl:txu, tyl:tyu].copy()
                co = segmentation.find_boundaries(cm, mode='inner')\
                        .astype(float)
                for ch in range(cell.shape[2]):
                    cell[..., ch] = np.maximum(cell[..., ch], co)

            yield cell
def compute_sdf(img_gt, out_shape):
    """
    compute the signed distance map of binary mask
    input: segmentation, shape = (batch_size,c, x, y, z)
    output: the Signed Distance Map (SDM) 
    sdf(x) = 0; x in segmentation boundary
             -inf|x-y|; x in segmentation
             +inf|x-y|; x out of segmentation
    normalize sdf to [-1,1]

    """

    img_gt = img_gt.astype(np.uint8)
    normalized_sdf = np.zeros(out_shape)

    for b in range(out_shape[0]):  # batch size
        for c in range(out_shape[1]):
            posmask = img_gt[b].astype(np.bool)
            if posmask.any():
                negmask = ~posmask
                posdis = distance(posmask)
                negdis = distance(negmask)
                boundary = skimage_seg.find_boundaries(
                    posmask, mode='inner').astype(np.uint8)
                sdf = (negdis - np.min(negdis)) / (np.max(negdis) - np.min(
                    negdis)) - (posdis - np.min(posdis)) / (np.max(posdis) -
                                                            np.min(posdis))
                sdf[boundary == 1] = 0
                normalized_sdf[b][c] = sdf
                assert np.min(sdf) == -1.0, print(np.min(posdis),
                                                  np.max(posdis),
                                                  np.min(negdis),
                                                  np.max(negdis))
                assert np.max(sdf) == 1.0, print(np.min(posdis),
                                                 np.min(negdis),
                                                 np.max(posdis),
                                                 np.max(negdis))

    return normalized_sdf
def get_planar(image_path, depth_path):
    image = cv2.imread(image_path)
    depth = cv2.imread(depth_path, 0)
    clusters = slic(image, 1000, 3, 10, convert2lab=True)
    contours = find_boundaries(clusters)
    idx_clusters = get_clusters(clusters)
    seg_num = np.amax(clusters) + 1
    #initilaize the parameters used in BFS algorihm
    adj_matrix = get_adjMatrix(clusters, contours)
    visited = []
    root = []
    qNode = []
    sample_num = 50
    for i in range(seg_num):
        visited.append(False)
        root.append(i)
    #Begin BFS
    for i in range(seg_num):
        if (visited[i] == False):
            visited[i] = True
            qNode.append(i)
            while (len(qNode) != 0):
                current = qNode.pop(0)
                for j in range(seg_num):
                    if (adj_matrix[current][j] != 0 and visited[j] == False):
                        visited[j] = True
                        qNode.append(j)
                        sample1 = get_samples(idx_clusters, current,
                                              sample_num, depth)
                        sample2 = get_samples(idx_clusters, j, sample_num,
                                              depth)
                        if (is_coplanar(sample1, sample2)):
                            root[j] = root[current]
    #end BFS
    plane_clusters = get_plane(idx_clusters, root)
    display_avg_plane(
        image,
        plane_clusters)  #display the average color of each clusters(ICRA 2009)
    display_box(image, plane_clusters)  #display bounding boxes of planar area
示例#39
0
def propagate(img,one_cell,sli_ind):
    z,x,y = img.shape
    sli_start,sli_end = sli_ind,sli_ind
    boundaries = find_boundaries(one_cell[sli_ind])
    #print boundaries*1
    #plt.imshow(boundaries,cmap='gray')
    #plt.show()
    one_cell[sli_ind]=boundaries*1
    while (sli_start>0 or sli_end<z-1):
        print sli_start
        if sli_start>0:
            cur = img[sli_start]
            cur_2 = one_cell[sli_start]
            points = np.transpose(np.nonzero(cur_2))
            prev_sli = np.zeros((x,y))
            for point in points:
                #print len(point)
                print point
                prev = img[sli_start-1]
                #print cur[point].shape
                diff = abs(prev[max(point[0]-2,0):min(point[0]+3,x),max(point[1]-2,0):min(point[1]+3,y)]-cur[point[0],point[1]])
                #print diff.shape
                ind = np.unravel_index(np.argmin(diff, axis=None), diff.shape)
                prev_sli[ind[0]+point[0]-2,ind[1]+point[1]-2]=1
                one_cell[sli_start-1]=prev_sli
            sli_start = sli_start-1
        if sli_end<z-1:
            cur = img[sli_end]
            cur_2 = one_cell[sli_start]
            points = np.transpose(np.nonzero(cur_2))
            prev_sli = np.zeros((x,y))
            for point in points:
                prev = img[sli_start-1]
                diff = abs(prev[max(point[0]-2,0):min(point[0]+3,x),max(point[1]-2,0):min(point[1]+3,y)]-cur[point[0],point[1]])
                ind = np.unravel_index(np.argmin(diff, axis=None), diff.shape)
                prev_sli[ind[0]+point[0]-4,ind[1]+point[1]-4]=1
                one_cell[sli_end+1]=prev_sli
            sli_end = sli_end+1
    return one_cell
示例#40
0
def fitting_curve(mask, margin=(60, 60)):
    """Compute thickness by fitting the curve
    Argument:
        margin: indicate valid mask region in case overfit

    Return:
        thickness: between upper and lower limbus
        curve_mask: the same shape as mask while labeled by 1
    """
    # 1. Find boundary
    bound = find_boundaries(mask, mode='outer')
    # 2. Crop marginal parts (may be noise)
    lhs, rhs = margin
    bound[:, :lhs] = 0  # left hand side
    bound[:, -rhs:] = 0  # right hand side
    # 3. Process upper and lower boundary respectively
    labeled_bound = label(bound, connectivity=bound.ndim)
    upper, lower = labeled_bound == 1, labeled_bound == 2
    # 1) fit poly
    f_up, f_lw = [
        np.poly1d(np.polyfit(np.where(limit)[1],
                             np.where(limit)[0], 6))
        for limit in [upper, lower]
    ]
    # 2) interpolation
    width = mask.shape[1]
    x_cord = range(width)
    y_up_fit, y_lw_fit = [f(x_cord) for f in [f_up, f_lw]]
    rw = 30  # roi width
    thickness = (y_up_fit - y_lw_fit)[width // 2 - rw:width // 2 + rw]

    curve_mask = np.zeros_like(mask)
    y_up_fit, y_lw_fit = [
        np.array(y, dtype=int) for y in [y_up_fit, y_lw_fit]
    ]  # int for slice
    curve_mask[y_up_fit[lhs:-rhs], x_cord[lhs:-rhs]] = 255
    curve_mask[y_lw_fit[lhs:-rhs], x_cord[lhs:-rhs]] = 255

    return abs(thickness.mean()), curve_mask
示例#41
0
def label_boundary(label_img, num_label, erosion_size=5, print_msg=False):
    """Label the boundary of the labeled array

    Parameters: label_img    - 2d np.ndarray of int, labeled array where all connected regions are assigned the same value
                num_label    - int, number of labeled regions
    Returns:    label_img    - 2d np.ndarray of int, labeled array where all connected regions are assigned the same value
                num_label    - int, number of labeled regions
                label_bound  - 2d np.ndarrary of bool, where True represent a boundary pixel.
    """

    if erosion_size > 0:
        # remove regions that would disappear after erosion
        # to ensure the consistency between label_img and label_bound
        erosion_structure = np.ones((erosion_size, erosion_size))
        label_erosion_img = morph.erosion(label_img, erosion_structure).astype(np.uint8)

        erosion_regions = measure.regionprops(label_erosion_img)
        if len(erosion_regions) < num_label:
            if print_msg:
                print('regions lost during morphological erosion operation:')

            label_erosion = [reg.label for reg in erosion_regions]
            for orig_reg in measure.regionprops(label_img):
                if orig_reg.label not in label_erosion:
                    label_img[label_img == orig_reg.label] = 0
                    if print_msg:
                        print('label: {}, area: {}, bbox: {}'.format(orig_reg.label,
                                                                     orig_reg.area,
                                                                     orig_reg.bbox))

        # update label
        label_img, num_label = measure.label(label_img, connectivity=1, return_num=True) # re-label

    # get label boundaries to facilitate bridge finding
    label_bound = seg.find_boundaries(label_erosion_img, mode='thick').astype(np.uint8)
    label_bound *= label_erosion_img

    return label_img, num_label, label_bound
def shapesPlot(shapes, inds, fig, ax):

    from skimage.measure import label, regionprops
    from skimage import feature
    from skimage.morphology import binary_dilation
    from skimage.segmentation import find_boundaries
    import pylab as plt
    import numpy as np

    #fig = plt.figure()
    #ax = fig.add_subplot(111)
    sz = np.int32(shapes.shape)

    for i in inds:
        img = shapes[i, :, :]
        mx = img[:].max()
        test = img > 0.4 * mx
        test2 = binary_dilation(binary_dilation(test))
        lbls = label(test2)
        rgs = regionprops(lbls)
        if np.size(rgs) > 0:
            szs = []
            for prop in rgs:
                szs.append(prop.area)
            ind = np.argmax(szs)
            if rgs[ind].area > 100:
                pt = rgs[ind].centroid
                region = lbls == ind + 1
                edges = find_boundaries(region)
                eln = edges.nonzero()
                ax.scatter(eln[1],
                           eln[0],
                           marker='.',
                           color='r',
                           linewidths=0.01)
                ax.text(pt[1] - 4, pt[0] + 4, '%i' % i, fontsize=14, color='k')

    return fig, ax
示例#43
0
def process_truth_dar(sample, L, init_rho_rel):
    """
    This is called on segmentation mask to extract the ground truth snake, etc...
    """
    truth = sample['label/segmentation'][..., 0]
    shape = truth.shape
    rc = np.array([shape[1] // 2, shape[0] // 2])

    contour_nodes = make_spline_contour(truth, rc, s=0, k=3, n=1000)

    interp_coords, interp_radii, angles, delta_angles = interpolate_ground_truth_polygon(
        L, rc, contour_nodes)
    # interp_radii, r = get_contour(truth, rc, L)

    sample['interp_radii'] = interp_radii[..., np.newaxis, np.newaxis]
    sample['init_contour_origin'] = rc[..., np.newaxis, np.newaxis]
    sample['interp_xy'] = interp_coords[..., np.newaxis]
    sample['interp_angles'] = angles[..., np.newaxis, np.newaxis]
    sample['delta_angles'] = delta_angles[..., np.newaxis, np.newaxis,
                                          np.newaxis]

    truth_contour = (segmentation.find_boundaries(truth > 0))
    data = distance_transform_edt(np.logical_not(truth_contour))
    beta = data.copy()
    beta[truth > 0] = 0
    kappa = data.copy()
    kappa[truth == 0] = 0

    sample['label/edt_D'] = data[..., np.newaxis]
    sample['label/edt_beta'] = beta[..., np.newaxis]
    sample['label/edt_kappa'] = kappa[..., np.newaxis]

    init_rho = init_rho_rel * np.max(truth.shape)
    sample['init_contour_radii'] = np.array([init_rho] * int(L))[...,
                                                                 np.newaxis,
                                                                 np.newaxis]

    return sample
示例#44
0
def split_nucl_and_memb_data(labeldata, nuclearmask=None):

    labeldata_memb = np.copy(labeldata)
    labeldata_nucl = np.copy(labeldata)

    memb_mask = find_boundaries(labeldata)
    for i, slc in enumerate(memb_mask):
        memb_mask[i, :, :] = binary_dilation(slc)
    labeldata_memb[~memb_mask] = 0

    if nuclearmask is None:
        nuclearmask = ~memb_mask

    labeldata_nucl[~nuclearmask] = 0

    #     print('mask_nucl0_sum', np.sum(~memb_mask))
    #     print('mask_nucl1_sum', np.sum(nuclearmask))
    #     print('mask_memb_sum', np.sum(memb_mask))
    #     print('label_full_sum', np.sum(labeldata.astype('bool')))
    #     print('label_memb_sum', np.sum(labeldata_memb.astype('bool')))
    #     print('label_nucl_sum', np.sum(labeldata_nucl.astype('bool')))

    return labeldata_memb, labeldata_nucl
示例#45
0
    def get_seg_map_boundaries(self, img=None, mode="inner"):

        if not isinstance(img, np.ndarray):
            img = self._img

        # img = imgtools.stack_image_dim(img)
        seg_map_bin = segmentation.find_boundaries(self._seg_map, mode=mode)
        if mode == "inner":
            img = cv2.resize(img, (seg_map_bin.shape[1], seg_map_bin.shape[0]))

        # colored boundaries
        # return img*seg_map_bin[:,:,np.newaxis]

        img_colorize = np.stack([
            np.zeros(seg_map_bin.shape, dtype=np.uint8),
            np.full(seg_map_bin.shape, 255, dtype=np.uint8),
            np.zeros(seg_map_bin.shape, dtype=np.uint8)
        ],
                                axis=2)

        return img * np.invert(
            seg_map_bin
        )[:, :, np.newaxis] + img_colorize * seg_map_bin[:, :, np.newaxis]
示例#46
0
    def find_grain_boundaries(self, segmented_image):
        """Find the grain boundaries.

        Parameters
        ----------
        segmented_image : ndarray
            Label image, output of a segmentation.

        Returns
        -------
        boundary : bool ndarray
            A bool ndarray, where True represents a boundary pixel.
        """

        boundary = segmentation.find_boundaries(segmented_image)
        if self.__interactive_mode:
            # Superimpose the boundaries of the segmented image on the original image
            superimposed = segmentation.mark_boundaries(self.original_image,
                                                        segmented_image, mode='thick')
            io.imshow(superimposed)
            io.show()
            print('Grain boundaries found.')
        return boundary
示例#47
0
def test_rays_volume_area(n_rays = 187):
    from skimage.measure import regionprops
    from skimage.segmentation import find_boundaries
    
    rays =  Rays_GoldenSpiral(n_rays)
    shape = (55,56,58)
    center = np.array(shape)//2

    dist = .4*np.random.uniform(.3*np.min(shape),.5*np.min(shape), n_rays)
    
    lbl = polyhedron_to_label([dist], [center], rays = rays, shape = shape)

    volume1 = rays.volume(dist)
    volume2 = np.mean(rays.volume(np.broadcast_to(dist,(13,17)+dist.shape)))
    volume3 = regionprops(lbl)[0].area

    surface1 = rays.surface(dist)
    surface2 = np.mean(rays.surface(np.broadcast_to(dist,(13,17)+dist.shape)))
    surface3 = np.sum(find_boundaries(lbl, mode=  "outer"))

    print(volume1, volume2, volume3)
    print(surface1, surface2, surface3)
    return lbl
示例#48
0
文件: plot.py 项目: guiwitz/cellpose
def outline_view(img0, maski, color=[1, 0, 0], mode='inner'):
    """
    Generates a red outline overlay onto image. 
    """
    #     img0 = utils.rescale(img0)
    if len(img0.shape) < 3:
        #         img0 = image_to_rgb(img0) broken, transposing some images...
        img0 = np.stack([img0] * 3, axis=-1)

    if SKIMAGE_ENABLED:
        outlines = find_boundaries(
            maski, mode=mode
        )  #not using masks_to_outlines as that gives border 'outlines'
    else:
        outlines = utils.masks_to_outlines(
            maski, mode=mode
        )  #not using masks_to_outlines as that gives border 'outlines'
    outY, outX = np.nonzero(outlines)
    imgout = img0.copy()
    #     imgout[outY, outX] = np.array([255,0,0]) #pure red
    imgout[outY, outX] = np.array(color)

    return imgout
def compute_sdf_np(arr, truncate_value=20):
    """
    compute the signed distance map of binary mask
    input: segmentation, shape = (x, y, z)
    output: the Signed Distance Map (SDM)
    sdf(t) = 0; t in segmentation boundary
             +inf|t-b|; x in segmentation
             -inf|t-b|; x out of segmentation
    normalize sdf to [-1,1]
    """
    posmask = arr.astype(np.bool)
    if posmask.any():
        negmask = ~posmask
        posdis = distance(posmask)
        negdis = distance(negmask)
        posdis[posdis > truncate_value] = truncate_value
        negdis[negdis > truncate_value] = truncate_value
        boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
        tsdf = (posdis - np.min(posdis)) / (np.max(posdis) - np.min(posdis)) - \
              (negdis - np.min(negdis)) / (np.max(negdis) - np.min(negdis))
        tsdf[boundary == 1] = 0

    return tsdf
示例#50
0
def save_sdf(gt_path=None):
    '''
    generate SDM for gt segmentation
    '''
    import nibabel as nib
    dir_path = 'C:/Seolen/PycharmProjects/semi_seg/semantic-semi-supervised-master/model/gan_sdfloss3D_0229_04/test'
    gt_path = dir_path + '/00_gt.nii.gz'
    gt_img = nib.load(gt_path)
    gt = gt_img.get_data().astype(np.uint8)
    posmask = gt.astype(np.bool)
    negmask = ~posmask
    posdis = distance(posmask)
    negdis = distance(negmask)
    boundary = skimage_seg.find_boundaries(posmask,
                                           mode='inner').astype(np.uint8)
    # sdf = (negdis - np.min(negdis)) / (np.max(negdis) - np.min(negdis)) - (posdis - np.min(posdis)) / ( np.max(posdis) - np.min(posdis))
    sdf = (posdis - np.min(posdis)) / (np.max(posdis) - np.min(posdis))
    sdf[boundary == 1] = 0
    sdf = sdf.astype(np.float32)

    sdf = nib.Nifti1Image(sdf, gt_img.affine)
    save_path = dir_path + '/00_sdm_pos.nii.gz'
    nib.save(sdf, save_path)
def mask_SuPix(overseg_img, SuPix_codes, show_bound=True):
    """Visualizing some super-pixels in
    the over-segmentation image where the 
    boundaries of all the super-pixels
    are shown, and the selected ones
    are high-lighted
    """

    s = overseg_img.shape

    masked_SuPix = np.zeros(s, dtype=bool)

    # get the boundaries if necessary
    if show_bound:
        for i in range(s[2]):
            masked_SuPix[:, :, i] = find_boundaries(overseg_img[:, :, i])

    # selected superpixels slices
    slices = np.unique(SuPix_codes[0, :])
    for j in slices:
        props = regionprops(overseg_img[:, :, j])
        SuPix_labels = SuPix_codes[1, SuPix_codes[0, :] == j]
        n_overseg = len(props)
        prop_labels = [props[i]['label'] for i in range(n_overseg)]
        # mask the super-pixels
        for label in SuPix_labels:
            label_loc = np.where(prop_labels == label)[0][0]
            # indices of the pixels in
            # this super-pixel
            multinds_2D = props[label_loc]['coords']
            vol = len(multinds_2D[:, 0])
            multinds_3D = (multinds_2D[:, 0], multinds_2D[:, 1],
                           np.ones(vol, dtype=int) * j)

            masked_SuPix[multinds_3D] = True

    return masked_SuPix
示例#52
0
    def __call__(self, img):

        img_arr = np.array(img)
        img_arr[img_arr == self.ignore_id] = self.num_classes

        if cfg.STRICTBORDERCLASS != None:
            one_hot_orig = self.new_one_hot_converter(img_arr)
            mask = np.zeros((img_arr.shape[0], img_arr.shape[1]))
            for cls in cfg.STRICTBORDERCLASS:
                mask = np.logical_or(mask, (img_arr == cls))
        one_hot = 0

        border = cfg.BORDER_WINDOW
        if (cfg.REDUCE_BORDER_EPOCH != -1
                and cfg.EPOCH > cfg.REDUCE_BORDER_EPOCH):
            border = border // 2
            border_prediction = find_boundaries(img_arr,
                                                mode='thick').astype(np.uint8)

        for i in range(-border, border + 1):
            for j in range(-border, border + 1):
                shifted = shift(img_arr, (i, j), cval=self.num_classes)
                one_hot += self.new_one_hot_converter(shifted)

        one_hot[one_hot > 1] = 1

        if cfg.STRICTBORDERCLASS != None:
            one_hot = np.where(np.expand_dims(mask, 2), one_hot_orig, one_hot)

        one_hot = np.moveaxis(one_hot, -1, 0)

        if (cfg.REDUCE_BORDER_EPOCH != -1
                and cfg.EPOCH > cfg.REDUCE_BORDER_EPOCH):
            one_hot = np.where(border_prediction, 2 * one_hot, 1 * one_hot)
            # print(one_hot.shape)
        return torch.from_numpy(one_hot).byte()
示例#53
0
#
# closed__ = binary_closing(cleared__, disk(int(sys.argv[2])))

filled = ndi.binary_fill_holes(cleared)

filled_ = ndi.binary_fill_holes(cleared_)

filled__ = ndi.binary_fill_holes(cleared__)

bopened = binary_opening(filled, disk(int(sys.argv[2])))

bopened_ = binary_opening(filled_, disk(int(sys.argv[3])))

bopened__ = binary_opening(filled__, disk(int(sys.argv[4])))

boundaries = find_boundaries(bopened, mode="inner")

boundaries_ = find_boundaries(bopened_, mode="inner")

boundaries__ = find_boundaries(bopened__, mode="inner")

# plt.subplot(131)
#
# plt.imshow(hematoxylin_)
#
# plt.subplot(132)
#
# plt.imshow(hematoxylin__)
#
# plt.subplot(133)
#
示例#54
0
    def make_svg(self,views):
        '''Generate path-based svg of atlas (paths we can manipulate in d3)'''
        import cairo
         # We will save complete svg (for file), partial (for embedding), and paths
        svg_data = dict(); svg_data_partial = dict(); svg_data_file = dict();
        if isinstance(views,str):
            views = [views]
        views = [v.lower() for v in views]
        self.views = views
        mr = self.mr.get_data()
        middles = [numpy.round(x/2) for x in self.mr.get_shape()]

        # Create a color lookup table
        colors_html = get_colors(len(self.labels),"hex")
        self.color_lookup = self.make_color_lookup(colors_html)

        with make_tmp_folder() as temp_dir:

            # Get all unique regions (may not be present in every slice)
            regions = [ x for x in numpy.unique(mr) if x != 0]

            # Get colors - will later be changed
            colors = get_colors(len(self.labels),"decimal")

            # Generate an axial, sagittal, coronal view
            slices = dict()
            for v in views:
                # Keep a list of region names that correspond to paths
                region_names = []

                # Generate each of the views
                if v == "axial": slices[v] = numpy.rot90(mr[:,:,middles[0]],2)
                elif v == "sagittal" : slices[v] = numpy.rot90(mr[middles[1],:,:],2)
                elif v == "coronal" : slices[v] = numpy.rot90(mr[:,middles[2],:],2)

                # For each region in the view, but not 0
                regions = [ x for x in numpy.unique(slices[v]) if x != 0]

                # Write svg to temporary file
                output_file = '%s/%s_atlas.svg' %(temp_dir,v)
                fo = file(output_file, 'wb')

                # Set up the "context" - what cairo calls a canvas
                width, height  = numpy.shape(slices[v])
                surface = cairo.SVGSurface (fo, width*3, height*3)
                ctx = cairo.Context (surface)
                ctx.scale(3.,3.)

                # 90 degree rotation matrix
                rotation_matrix = cairo.Matrix.init_rotate(numpy.pi/2)

                for rr in range(0,len(regions)):
                    index_value = regions[rr]
                    #region_name = self.labels[str(index_value)].label
                    filtered = numpy.zeros(numpy.shape(slices[v]))
                    filtered[slices[v] == regions[rr]] = 1
                    region = img_as_float(find_boundaries(filtered)) # We aren't using Canny anymore...

                    ctx.set_source_rgb (float(colors[index_value-1][0]), float(colors[index_value-1][1]), float(colors[index_value-1][2])) # Solid color

                    # Segment!
                    segments_fz = felzenszwalb(region, scale=100, sigma=0.1, min_size=10)

                    # For each cluster in the region, skipping value of 0
                    for c in range(1,len(numpy.unique(segments_fz))):
                        cluster = numpy.zeros(numpy.shape(region))
                        cluster[segments_fz==c] = 1
                        # Create distance matrix for points
                        x,y = numpy.where(cluster==1)
                        points = [[x[i],y[i]] for i in range(0,len(x))]
                        disty = squareform(pdist(points, 'euclidean'))
                        # This keeps track of which we have already visited
                        visited = []; row = 0; current = points[row]
                        visited.append(row)

                        # We need to remember the first point, for the last one
                        fp = current

                        while len(visited) != len(points):
                            thisx = current[0]
                            thisy = current[1]
                            ctx.move_to(thisx, thisy)
                            # Find closest point, only include columns we have not visited
                            distances = disty[row,:]
                            distance_lookup = dict()
                            # We need to preserve indices but still eliminate visited
                            for j in range(0,len(distances)):
                                if j not in visited: distance_lookup[j] = distances[j]
                            # Get key minimum distance
                            row = min(distance_lookup, key=distance_lookup.get)
                            next = points[row]
                            nextx = next[0]
                            nexty = next[1]
                            # If the distance is more than N pixels, close the path
                            # This resolves some of the rough edges too
                            if min(distance_lookup) > 70:
                                ctx.line_to(fp[0],fp[1])
                                #cp = [(current[0]+fp[0])/2,(current[1]+fp[1])/2]
                                #ctx.curve_to(fp[0],fp[1],cp[0],cp[1],cp[0],cp[1])
                                ctx.set_line_width(1)
                                ctx.close_path()
                                fp = next
                            else:
                                #cp = [(current[0]+nextx)/2,(current[1]+nexty)/2]
                                #ctx.curve_to(nextx,nexty,cp[0],cp[1],cp[0],cp[1])
                                ctx.line_to(nextx, nexty)
                                # Set next point to be current
                            visited.append(row)
                            current = next

                        # Go back to the first point
                        ctx.move_to(current[0],current[1])
                        #cp = [(current[0]+fp[0])/2,(current[1]+fp[1])/2]
                        #ctx.curve_to(fp[0],fp[1],cp[0],cp[1],cp[0],cp[1])
                        ctx.line_to(fp[0],fp[1])
                        # Close the path
                        ctx.set_line_width (1)
                        ctx.stroke()

                # Finish the surface
                surface.finish()
                fo.close()

                # Now grab the file, set attributes
                # Give group name based on atlas, region id based on matching color
                dom = minidom.parse(output_file)
                for group in dom.getElementsByTagName("g"):
                    group.setAttribute("id",os.path.split(self.file)[-1])
                    group.setAttribute("class",v)
                expression = re.compile("stroke:rgb")
                # Add class to svg - important so can manipulate in d3
                dom.getElementsByTagName("svg")[0].setAttribute("class",v)
                for path in dom.getElementsByTagName("path"):
                    style = path.getAttribute("style")
                    # This is lame - but we have to use the color to look up the region
                    color = [x for x in style.split(";") if expression.search(x)][0]
                    color = [percent_to_float(x) for x in color.replace("stroke:rgb(","").replace(")","").split(",")]
                    region_index = [x for x in range(0,len(colors)) if numpy.equal(colors[x],color).all()][0]+1
                    region_label = self.labels[str(region_index)].label
                    # We don't want to rely on cairo to style the paths
                    self.remove_attributes(path,"style")
                    self.set_attributes(path,["id","stroke"],[region_label,self.color_lookup[region_label]])
                svg_data_file[v] = dom.toxml()
                svg_data[v] = dom.toxml().replace("<?xml version=\"1.0\" ?>","") # get rid of just xml tag
                svg_data_partial[v] = "/n".join(dom.toxml().split("\n")[1:-1])

        return svg_data, svg_data_partial, svg_data_file
def FeatureExtraction(Label, In, Ic, W, K=128, Fs=6, Delta=8):
    """
    Calculates features from a label image.

    Parameters
    ----------
    Label : array_like
        A T x T label image.
    In : array_like
        A T x T intensity image for Nuclei.
    Ic : array_like
        A T x T intensity image for Cytoplasms.
    W : array_like
        A 3x3 matrix containing the stain colors in its columns.
        In the case of two stains, the third column is zero and will be
        complemented using cross-product. The matrix should contain a
        minumum two nonzero columns.
    K : Number of points for boundary resampling to calculate fourier
        descriptors. Default value = 128.
    Fs : Number of frequency bins for calculating FSDs. Default value = 6.
    Delta : scalar, used to dilate nuclei and define cytoplasm region.
            Default value = 8.
    Returns
    -------
    df : 2-dimensional labeled data structure, float64
        Pandas data frame.
    Notes
    -----
    The following features are computed:

    - `Centroids`:
        - X,Y

    - `Morphometry features`:
        - Area,
        - Perimeter,
        - MajorAxisLength,
        - MinorAxisLength,
        - Eccentricity,
        - Circularity,
        - Extent,
        - Solidity

    - `Fourier shape descriptors`:
        - FSD1-FSD6

    - Intensity features for hematoxylin and cytoplasm channels:
        - MinIntensity, MaxIntensity,
        - MeanIntensity, StdIntensity,
        - MeanMedianDifferenceIntensity,
        - Entropy, Energy, Skewness and Kurtosis

    - Gradient/edge features for hematoxylin and cytoplasm channels:
        - MeanGradMag, StdGradMag, SkewnessGradMag, KurtosisGradMag,
        - EntropyGradMag, EnergyGradMag,
        - SumCanny, MeanCanny
    References
    ----------
    .. [1] D. Zhang et al. "A comparative study on shape retrieval using
       Fourier descriptors with different shape signatures," In Proc.
       ICIMADE01, 2001.
    .. [2] Daniel Zwillinger and Stephen Kokoska. "CRC standard probability
       and statistics tables and formulae," Crc Press, 1999.
    """

    # get total regions
    NumofLabels = Label.max()

    # get Label size x
    size_x = Label.shape[0]

    # initialize centroids
    CentroidX = []
    CentroidY = []

    # initialize morphometry features
    Area = []
    Perimeter = []
    Eccentricity = []
    Circularity = []
    MajorAxisLength = []
    MinorAxisLength = []
    Extent = []
    Solidity = []

    # initialize FSD feature group
    FSDGroup = np.zeros((NumofLabels, Fs))

    # initialize Nuclei, Cytoplasms
    Nuclei = [[] for i in range(NumofLabels)]
    Cytoplasms = [[] for i in range(NumofLabels)]

    # create round structuring element
    Disk = disk(Delta)

    # initialize panda dataframe
    df = pd.DataFrame()

    # fourier descriptors, spaced evenly over the interval 1:K/2
    Interval = np.round(
        np.power(
            2, np.linspace(0, math.log(K, 2)-1, Fs+1, endpoint=True)
        )
    ).astype(np.uint8)

    # extract feature information
    for region in regionprops(Label):
        # add centroids
        CentroidX = np.append(CentroidX, region.centroid[0])
        CentroidY = np.append(CentroidY, region.centroid[1])
        # add morphometry features
        Area = np.append(Area, region.area)
        Perimeter = np.append(Perimeter, region.perimeter)
        Eccentricity = np.append(Eccentricity, region.eccentricity)
        if region.perimeter == 0:
            Circularity = np.append(Circularity, 0)
        else:
            Circularity = np.append(
                Circularity,
                4 * math.pi * region.area / math.pow(region.perimeter, 2)
            )
        MajorAxisLength = np.append(MajorAxisLength, region.major_axis_length)
        MinorAxisLength = np.append(MinorAxisLength, region.minor_axis_length)
        Extent = np.append(Extent, region.extent)
        Solidity = np.append(Solidity, region.solidity)
        # get bounds of dilated nucleus
        bounds = GetBounds(region.bbox, Delta, size_x)
        # grab nucleus mask
        Nucleus = (
            Label[bounds[0]:bounds[1], bounds[2]:bounds[3]] == region.label
        ).astype(np.uint8)
        # find nucleus boundaries
        Bounds = np.argwhere(
            find_boundaries(Nucleus, mode="inner").astype(np.uint8) == 1
        )
        # calculate and add FSDs
        FSDGroup[region.label-1, :] = FSDs(
            Bounds[:, 0], Bounds[:, 1],
            K, Interval
        )
        # generate object coords for nuclei and cytoplasmic regions
        Nuclei[region.label-1] = region.coords
        # get mask for all nuclei in neighborhood
        Mask = (
            Label[bounds[0]:bounds[1], bounds[2]:bounds[3]] > 0
        ).astype(np.uint8)
        # remove nucleus region from cytoplasm+nucleus mask
        cytoplasm = (
            np.logical_xor(Mask, dilation(Nucleus, Disk))
        ).astype(np.uint8)
        # get list of cytoplasm pixels
        Cytoplasms[region.label-1] = GetPixCoords(cytoplasm, bounds)

    # calculate hematoxlyin features, capture feature names
    HematoxylinIntensityGroup = IntensityFeatureGroup(In, Nuclei)
    HematoxylinTextureGroup = TextureFeatureGroup(In, Nuclei)
    HematoxylinGradientGroup = GradientFeatureGroup(In, Nuclei)
    # calculate eosin features
    EosinIntensityGroup = IntensityFeatureGroup(Ic, Cytoplasms)
    EosinTextureGroup = TextureFeatureGroup(Ic, Cytoplasms)
    EosinGradientGroup = GradientFeatureGroup(Ic, Cytoplasms)

    # add columns to dataframe
    df['X'] = CentroidX
    df['Y'] = CentroidY

    df['Area'] = Area
    df['Perimeter'] = Perimeter
    df['Eccentricity'] = Eccentricity
    df['Circularity'] = Circularity
    df['MajorAxisLength'] = MajorAxisLength
    df['MinorAxisLength'] = MinorAxisLength
    df['Extent'] = Extent
    df['Solidity'] = Solidity

    for i in range(0, Fs):
        df['FSD' + str(i+1)] = FSDGroup[:, i]

    for f in HematoxylinIntensityGroup._fields:
        df['Hematoxylin' + f] = getattr(HematoxylinIntensityGroup, f)

    for f in HematoxylinTextureGroup._fields:
        df['Hematoxylin' + f] = getattr(HematoxylinTextureGroup, f)

    for f in HematoxylinGradientGroup._fields:
        df['Hematoxylin' + f] = getattr(HematoxylinGradientGroup, f)

    for f in EosinIntensityGroup._fields:
        df['Cytoplasm' + f] = getattr(EosinIntensityGroup, f)

    for f in EosinTextureGroup._fields:
        df['Cytoplasm' + f] = getattr(EosinTextureGroup, f)

    for f in EosinGradientGroup._fields:
        df['Cytoplasm' + f] = getattr(EosinGradientGroup, f)

    return df
示例#56
0
import matplotlib.pyplot as plt

from src.image_preprocess.PeakDetector import generate_green_map

__author__ = 'Kern'

if len(sys.argv) < 2:
    raise ValueError("Usage:", sys.argv[0], " Missing some argument to indicate input files")
path = sys.argv[1]

image = io.imread(path)
# image = uniform_filter(image_input)
image_green = generate_green_map(image)


# image_grey = color.rgb2gray(image)

image_out = find_boundaries(image)
image_mark = mark_boundaries(image, image_green)

fig, (ax0, ax1, ax2) = plt.subplots(1, 3)
ax0.imshow(image)
ax0.set_title('Input image')
ax1.imshow(image_out)
ax1.set_title('boundaries image')
ax1.axis('image')
ax2.imshow(image_mark)
ax2.set_title("grey image")

plt.show()
示例#57
0
def watershedBlobAnalysis(img, thr, bright=True, showPlot=True, sig=3, pkSz=3, minPx=10, sf=1.79):
    """
    Binarize an input image using the supplied threshold, perform a
    watershed analysis on a smoothed Euclidean Distance Map, optionally
    display boundaries on the image, and return a list of lists of the
    properties of the blobs.

    Parameters
    ----------
    img: ndarray (2D)
        Input image, assumed to be grayscale
    thr: number
        The value to use for the gray level threshold
    bright: boolean (True)
        A flag indicating if the background is bright, and the blobs
        have gray levels less than the threshold value or the reverse.
    showPlot: boolean (True)
        A flag indicating whether to display a plot of the boundaries
        displayed in red on the grayscale input image
    sig: number (3)
        The sigma parameter for a gaussian smooth of the Euclidean
        Distance Map
    pkSz: number (3)
        The peak size of the footprint for the call to find the peak 
        local maxima.
    minPx: integrer (10)
        the minimum number of pixels to conside a "blob"
    sf: float (1.79)
        The scale factor (units/px) for the image. The default is for a
        test image of AgX grains where the scale factor is 1.79 nm/px.

    Returns

    out: a list of lists
        [equivalent circular diameter, centroid, aspect ratio, solidity,
        circircularity, squareness]
        Lists of feature vectors useful for particle size and shape
        analysis. Only the ECD is in dimensions implied by the scale
        factor.
        
    """
    from math import sqrt
    import numpy as np
    import matplotlib.pyplot as plt
    from scipy import ndimage
    from skimage import img_as_ubyte
    from skimage.morphology import disk, watershed, remove_small_objects
    from skimage.filters.rank import median
    from skimage.feature import peak_local_max
    from skimage.segmentation import find_boundaries, mark_boundaries
    from skimage.measure import regionprops
    from skimage.color import gray2rgb
    
    if bright:
        bin_img = img < thr
    else:
        bin_img = img > thr
    bin_img = remove_small_objects(bin_img, min_size=minPx, connectivity=1,
                                   in_place=False)
    dist = ndimage.distance_transform_edt(bin_img)
    smooth = ndimage.gaussian_filter(dist, sig)
    # peak_local_max
    # possible below: #,labels=c.binarybackground)
    
    local_maxi = peak_local_max(smooth, indices=False, footprint=np.ones((pkSz, pkSz)))
    markers = ndimage.label(local_maxi)[0]  
    labels = watershed(-smooth, markers, mask=bin_img)
    props = regionprops(labels)
    ecd = []
    cent = []
    ar = []
    solid =[]
    circ = []
    square = []
    for prop in props:
        cent.append(prop.centroid)
        ecd.append(round(sf*prop.equivalent_diameter, 3))
        if prop.minor_axis_length == 0:
            ar.append(float('nan'))
        else:
            ar.append(round(prop.major_axis_length/prop.minor_axis_length, 3))
        solid.append(prop.solidity)
        fArea = float(prop.area)
        perim = prop.perimeter
        cir = 4.0 * np.pi * (fArea / (perim)**2)
        circ.append(cir)
        square.append(0.25*perim/sqrt(fArea))
    #find outline of objects for plotting
    
    if showPlot:
        boundaries = find_boundaries(labels)
        max_g = np.max(img)
        if max_g > 255:
            img.astype(np.float32)
            img = img / float(max_g)
        img_rgb = gray2rgb(img) # transform 16bit to 8
        overlay = np.flipud(mark_boundaries(img_rgb, boundaries, color=(1, 0, 0)))
        # plt.imshow(overlay);
    
        fig = plt.figure(figsize=(7,7))
        ax = fig.add_subplot(1, 1, 1)
        ax.imshow(overlay, cmap='spectral');
        ax.xaxis.set_visible(False);
        ax.yaxis.set_visible(False)
        fig.set_tight_layout(True);
    
    return ([ecd, cent, ar, solid, circ, square])
示例#58
0
    co_mom0 = co_cube.spectral_slab(start, end).moment0()
    co_mom0_reproj = reproject_interp(co_mom0.hdu, hi_mom0.header)[0]
    co_mom0 = Projection(co_mom0_reproj, wcs=hi_mom0.wcs)

    # Need a mask from the HI
    # Adjust the sigma in a single channel to the moment0 in the slab
    # sigma = 0.00152659 * hi_slab.shape[0] * \
    #     np.abs((hi_slab.spectral_axis[1] - hi_slab.spectral_axis[0]).value)

    bub = BubbleFinder2D(hi_mom0, auto_cut=False, sigma=sigma)
    bub.create_mask(bkg_nsig=30, region_min_nsig=60, mask_clear_border=False)

    # skeleton = medial_axis(~bub.mask)
    # skeletons.append(skeleton)

    edge_mask = find_boundaries(bub.mask, connectivity=2, mode='outer')
    hole_mask = bub.mask.copy()
    # Now apply a radial boundary to the edge mask where the CO data is valid
    # This is the same cut-off used to define the valid clouds
    radial_cut = radii <= max_radius
    edge_mask *= radial_cut
    edge_masks.append(edge_mask)

    dist_trans = nd.distance_transform_edt(~edge_mask)
    # Assign negative values to regions within holes.
    dist_trans[hole_mask] = -dist_trans[hole_mask]

    # hist = p.hist(co_mom0.value[np.isfinite(co_mom0.value)], bins=100)
    # p.draw()
    # raw_input("?")
    # p.clf()
示例#59
0
    # make it square (apparently avconv doesn't work correctly with arbitrary frame size...)
    win_size = (max(win_size), max(win_size))


    ### centroids
    cell_centers = np.array([regionprops(mask)[0].centroid for mask in masks])
    smooth_cell_centers = smooth_2D_trajectory(cell_centers)


    ### extract windows and put into big array

    windows = np.empty((len(frames), win_size[0], win_size[1]))
    for frame_num, (frame, center, mask) in enumerate(zip(frames, smooth_cell_centers, masks)):

        # make boundary bright
        boundary = find_boundaries(mask)
        frame += 0.5*frame.max()*boundary

        # extract window centered on mask centroid
        i, j = int(round(center[0])), int(round(center[1]))
        ista, jsta = i-win_size[0]/2, j-win_size[1]/2
        iend, jend = ista+win_size[0], jsta+win_size[1]
        win = frame[ista:iend, jsta:jend]
        windows[frame_num,:,:] = correct_orientation(win)


    # save to file
    print 'Saving movie to %s.' % out_fn
    write_video(windows, out_fn)