コード例 #1
0
ファイル: grey.py プロジェクト: AceHao/scikit-image
def erosion(image, selem=None, out=None, shift_x=False, shift_y=False):
    """Return greyscale morphological erosion of an image.

    Morphological erosion sets a pixel at (i,j) to the minimum over all pixels
    in the neighborhood centered at (i,j). Erosion shrinks bright regions and
    enlarges dark regions.

    Parameters
    ----------
    image : ndarray
        Image array.
    selem : ndarray, optional
        The neighborhood expressed as an array of 1's and 0's.
        If None, use cross-shaped structuring element (connectivity=1).
    out : ndarrays, optional
        The array to store the result of the morphology. If None is
        passed, a new array will be allocated.
    shift_x, shift_y : bool, optional
        shift structuring element about center point. This only affects
        eccentric structuring elements (i.e. selem with even numbered sides).

    Returns
    -------
    eroded : array, same shape as `image`
        The result of the morphological erosion.

    Notes
    -----
    For ``uint8`` (and ``uint16`` up to a certain bit-depth) data, the
    lower algorithm complexity makes the `skimage.filter.rank.minimum`
    function more efficient for larger images and structuring elements.

    Examples
    --------
    >>> # Erosion shrinks bright regions
    >>> import numpy as np
    >>> from skimage.morphology import square
    >>> bright_square = np.array([[0, 0, 0, 0, 0],
    ...                           [0, 1, 1, 1, 0],
    ...                           [0, 1, 1, 1, 0],
    ...                           [0, 1, 1, 1, 0],
    ...                           [0, 0, 0, 0, 0]], dtype=np.uint8)
    >>> erosion(bright_square, square(3))
    array([[0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0],
           [0, 0, 1, 0, 0],
           [0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0]], dtype=uint8)

    """
    selem = np.array(selem)
    selem = _shift_selem(selem, shift_x, shift_y)
    if out is None:
        out = np.empty_like(image)
    ndi.grey_erosion(image, footprint=selem, output=out)
    return out
コード例 #2
0
ファイル: fan_finder.py プロジェクト: michaelaye/pymars
def fan_structure(median=11):
    """function to analyse fan sub structure by setting non-fan pixels to 0 and histo-equalizing the remaining img with only fans. Also
    median-filtering is done to reduce noise."""
    xcoords=[388, 449,497]
    ycoords =[142, 254, 118]
    x2 = [403,590]
    y2 = [286,254]
    x3 = [403,459]
    y3 = [286,375]
    x4 = [1372,1420]
    y4 = [610,680]
    x5 = [1372,1467]
    y5 = [610,590]
    x6 = [1321,1422]
    y6 = [612,750]
    x7 = [1321,1439]
    y7 = [612,585]
    fig = plt.figure()
    ax = fig.add_subplot(111)
    data = get_data('ESP_011931_0945')
    data = nd.grey_erosion(data,footprint=np.ones((3,3)))
    data = nd.grey_erosion(data,footprint=np.ones((3,3)))
    data = nd.grey_dilation(data,footprint=np.ones((3,3)))
    data = nd.grey_dilation(data,footprint=np.ones((3,3)))
    threshold=0.045
    fans = data < threshold
    data = data*255/data.max()
    intfans = np.zeros(data.shape, dtype=np.uint16)
    intfans[fans] = np.round(data[fans])
    filtered = nd.median_filter(intfans,median)
    equ = hist_equal(filtered)
    im = ax.imshow(equ,cmap = cm.spectral,aspect='equal')
    ax.set_title('Fans within fans in Ithaca, filtered, opened and hist-equalized')
    ax.set_xlabel('0.5 m/pixel')
#    fig.savefig('Fans_within_fans.png')
#    cb =plt.colorbar(im,shrink=0.75)
#    cb.set_label('I/F')
    plt.plot(xcoords[:-1],ycoords[:-1],[xcoords[0],xcoords[2]],[ycoords[0],ycoords[2]],
            color='white',
            hold=True,
            scalex=False,scaley=False)
    plt.plot(x2,y2,color='white',hold=True,scalex=False,scaley=False)
    plt.plot(x3,y3,color='white',hold=True,scalex=False,scaley=False)
    plt.plot(x4,y4,color='white',hold=True,scalex=False,scaley=False)
    plt.plot(x5,y5,color='white',hold=True,scalex=False,scaley=False)

    plt.plot(x6,y6,color='white',hold=True,scalex=False,scaley=False)
    plt.plot(x7,y7,color='white',hold=True,scalex=False,scaley=False)

#    plt.close(fig)
    plt.show()
コード例 #3
0
def rolling_ball_filter(data, ball_radius, spacing=None, top=False, **kwargs):
    """Rolling ball filter implemented with morphology operations

    This implenetation is very similar to that in ImageJ and uses a top hat transform
    with a ball shaped structuring element
    https://en.wikipedia.org/wiki/Top-hat_transform

    Parameters
    ----------
    data : ndarray type uint8
        image data (assumed to be on a regular grid)
    ball_radius : float
        the radius of the ball to roll
    spacing : int or sequence
        the spacing of the image data
    top : bool
        whether to roll the ball on the top or bottom of the data
    kwargs : key word arguments
        these are passed to the ndimage morphological operations

    Returns
    -------
    data_nb : ndarray
        data with background subtracted as uint8
    bg : ndarray
        background that was subtracted from the data
    """
    data = data.astype(np.int16)
    ndim = data.ndim
    if spacing is None:
        spacing = _normalize_sequence(1, ndim)
    else:
        spacing = _normalize_sequence(spacing, ndim)

    radius = np.asarray(_normalize_sequence(ball_radius, ndim))
    mesh = np.array(np.meshgrid(*[np.arange(-r, r + s, s) for r, s in zip(radius, spacing)], indexing="ij"))
    structure = 2 * np.sqrt(2 - ((mesh / radius.reshape(-1, *((1,) * ndim)))**2).sum(0))
    structure[~np.isfinite(structure)] = 0
    if not top:
        # ndi.white_tophat(y, structure=structure, output=background)
        background = ndi.grey_erosion(data, structure=structure, **kwargs)
        background = ndi.grey_dilation(background, structure=structure, **kwargs)
    else:
        # ndi.black_tophat(y, structure=structure, output=background)
        background = ndi.grey_dilation(data, structure=structure, **kwargs)
        background = ndi.grey_erosion(background, structure=structure, **kwargs)

    data_corr = data - background
    data_corr[data_corr<0] = 0

    return data_corr.astype(np.uint8), background.astype(np.uint8)
コード例 #4
0
ファイル: anatomical.py プロジェクト: falfaroalmagro/mriqc
def artifact_mask(imdata, airdata):
    """Computes a mask of artifacts found in the air region"""

    if not np.issubdtype(airdata.dtype, np.integer):
        airdata[airdata < .95] = 0
        airdata[airdata > 0.] = 1

    bg_img = imdata * airdata
    # Find the background threshold (the most frequently occurring value
    # excluding 0)
    hist, bin_edges = np.histogram(bg_img[bg_img > 0], bins=128)
    bg_threshold = np.mean(bin_edges[np.argmax(hist)])


    # Apply this threshold to the background voxels to identify voxels
    # contributing artifacts.
    qi1_img = np.zeros_like(bg_img)
    qi1_img[bg_img > bg_threshold] = bg_img[bg_img > bg_threshold]

    # Create a structural element to be used in an opening operation.
    struc = nd.generate_binary_structure(3, 2)

    # Perform an a grayscale erosion operation.
    qi1_img = nd.grey_erosion(qi1_img, structure=struc).astype(np.float32)
    # Binarize and binary dilation
    qi1_img[qi1_img > 0.] = 1
    qi1_img[qi1_img < 1.] = 0
    qi1_img = nd.binary_dilation(qi1_img, structure=struc).astype(np.uint8)
    return qi1_img
コード例 #5
0
ファイル: agglo2.py プロジェクト: DaniUPC/gala
def edge_matrix(labels, connectivity=1):
    """Generate a COO matrix containing the coordinates of edge pixels.

    Parameters
    ----------
    labels : array of int
        An array of labeled pixels (or voxels).
    connectivity : int in {1, ..., labels.ndim}
        The square connectivity for considering neighborhood.

    Returns
    -------
    edges : sparse.coo_matrix
        A COO matrix where (i, j) indicate neighboring labels and the
        corresponding data element is the linear index of the edge pixel
        in the labels array.
    """
    conn = ndi.generate_binary_structure(labels.ndim, connectivity)
    eroded = ndi.grey_erosion(labels, footprint=conn).ravel()
    dilated = ndi.grey_dilation(labels, footprint=conn).ravel()
    labels = labels.ravel()
    boundaries0 = np.flatnonzero(eroded != labels)
    boundaries1 = np.flatnonzero(dilated != labels)
    labels_small = np.concatenate((eroded[boundaries0], labels[boundaries1]))
    labels_large = np.concatenate((labels[boundaries0], dilated[boundaries1]))
    n = np.max(labels_large) + 1
    data = np.concatenate((boundaries0, boundaries1))
    sparse_graph = sparse.coo_matrix((data, (labels_small, labels_large)),
                                     dtype=np.int_, shape=(n, n))
    return sparse_graph
コード例 #6
0
ファイル: imtool.py プロジェクト: BloodNg/FreeROI
def multi_label_edge_detection(data):
    f = nd.generate_binary_structure(len(data.shape), 1)
    bound = (nd.grey_erosion(data,footprint=f) != nd.grey_dilation(data,footprint=f)) - \
            (nd.binary_dilation(data.astype(np.bool)) - data.astype(np.bool))     # the unwanted thick bounds
    data=bound.astype(data.dtype)
    
    return data
コード例 #7
0
    def adjustImage(self, img_size, pixel_arr, black, white, brightness, outline, invert, binary):

        mask = pixel_arr > white
        pixel_arr[mask] = 255
        mask = pixel_arr < black
        pixel_arr[mask] = 0

        if invert:
            pixel_arr = 255 - pixel_arr

        if outline:
            pixel_arr = pixel_arr / ((2**8-1.0)/(2**16-1.0))
            pixel_arr = pixel_arr.reshape(img_size)
            eroded_arr = ndimage.grey_erosion(pixel_arr, mode='constant', size=(3,3))
            outline_arr = pixel_arr - eroded_arr
            outline_arr = outline_arr.reshape((reduce(lambda x, y: x * y, outline_arr.shape), ))
            pixel_arr = outline_arr

        if binary:
            mask = pixel_arr > 0
            pixel_arr[mask] = 255
            #pixel_arr[numpy.invert(mask)] = 0
    
        pixel_arr = pixel_arr * brightness

        if brightness > 1.0:
            mask = pixel_arr > 255
            pixel_arr[mask] = 255

        return pixel_arr
コード例 #8
0
ファイル: bacteria.py プロジェクト: hnphan/cs365project3
 def generateData(self):
     input = self.getInput(0).getData()/255
     fill = ndimage.grey_erosion(input, size = (3,3))
     
     output = input - fill
     self.getOutput(0).setData(output*255)
     self.getOutput(1).setData(input*255)        
コード例 #9
0
def findendsjunctions(img, disp=None):
    if disp is None:
        disp = 0

    # Create a look up table to find junctions.
    # lut = ndimage.grey_erosion(junction(img), size=(3,3))
    junctions = ndimage.grey_erosion(img, size=(3, 3))
    # Row and column coordinates of junction points in the image.
    rjcj = np.where(junctions)

    ends = ndimage.grey_erosion(img, size=(3, 3))
    # Row and column coordinates of end points in the image.
    rece = np.where(ends)

    # Display image with the junctions and endings marked.
    if disp:
        plt.imshow(img)
コード例 #10
0
ファイル: imtool.py プロジェクト: BNUCNL/FreeROI
def multi_label_edge_detection(data):
    """Detect the edge in the image with multi-labels."""
    f = nd.generate_binary_structure(len(data.shape), 1)
    # the unwanted thick bounds
    bound = (nd.grey_erosion(data, footprint=f) != nd.grey_dilation(data, footprint=f)) - (
        nd.binary_dilation(data.astype(np.bool)) - data.astype(np.bool)
    )
    data = bound.astype(data.dtype)
    return data
コード例 #11
0
ファイル: filter.py プロジェクト: julien-diener/ndarray
def apply(array, **kwargs):
    """
    Apply a set of standard filter to array data: 
    
    Call: apply(array-data, <list of key=value arguments>)

    The list of key-value define the filtering to be done and should be given in
    the order to be process. Possible key-value are:
    
      * smooth:  gaussian filtering, value is the sigma parameter (scalar or tuple)
      * uniform: uniform  filtering (2)
      * max:     maximum  filtering (1)
      * min:     minimum  filtering (1)
      * median:  median   filtering (1)
      
      * dilate: grey dilatation (1)
      * erode:  grey erosion    (1)
      * close:  grey closing    (1)
      * open:   grey opening    (1)
      
      * linear_map: call linear_map(), value is the tuple (min,max)   (3)
      * normalize:  call normalize(),  value is the method            (3)
      * adaptive:   call adaptive(),   value is the sigma             (3)
      * adaptive_:  call adaptive(),   with uniform kernel            (3)
          
    The filtering is done using standard scipy.ndimage functions.
    
    (1) The value given (to the key) is the width of the the filter: 
        the distance from the center pixel (the size of the filter is thus 2*value+1)
        The neighborhood is an (approximated) boolean circle (up to discretization)
    (2) Same as (*) but the neighborhood is a complete square
    (3) See doc of respective function
    """
    for key in kwargs:
        value = kwargs[key]
        if key not in ('smooth','uniform'):
            fp = _kernel.distance(array.ndim*(2*value+1,))<=value  # circular filter
            
        if   key=='smooth' : array = _nd.gaussian_filter(array, sigma=value)
        elif key=='uniform': array = _nd.uniform_filter( array, size=2*value+1)
        elif key=='max'    : array = _nd.maximum_filter( array, footprint=fp)
        elif key=='min'    : array = _nd.minimum_filter( array, footprint=fp)
        elif key=='median' : array = _nd.median_filter(  array, footprint=fp)

        elif key=='dilate' : array = _nd.grey_dilation(  array, footprint=fp)
        elif key=='erode'  : array = _nd.grey_erosion(   array, footprint=fp)
        elif key=='open'   : array = _nd.grey_opening(   array, footprint=fp)
        elif key=='close'  : array = _nd.grey_closing(   array, footprint=fp)
        
        elif key=='linear_map': array = linear_map(array, min=value[0], max=value[1])
        elif key=='normalize' : array = normalize( array, method = value)
        elif key=='adaptive'  : array = adaptive(  array, sigma  = value, kernel='gaussian')
        elif key=='adaptive_' : array = adaptive(  array, sigma  = value, kernel='uniform')
        else: 
            print '\033[031mUnrecognized filter :', key
            
    return array
コード例 #12
0
ファイル: core.py プロジェクト: astropy/photutils
    def outline_segments(self, mask_background=False):
        """
        Outline the labeled segments.

        The "outlines" represent the pixels *just inside* the segments,
        leaving the background pixels unmodified.

        Parameters
        ----------
        mask_background : bool, optional
            Set to `True` to mask the background pixels (labels = 0) in
            the returned image.  This is useful for overplotting the
            segment outlines on an image.  The default is `False`.

        Returns
        -------
        boundaries : 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray`
            An image with the same shape of the segmentation image
            containing only the outlines of the labeled segments.  The
            pixel values in the outlines correspond to the labels in the
            segmentation image.  If ``mask_background`` is `True`, then
            a `~numpy.ma.MaskedArray` is returned.

        Examples
        --------
        >>> from photutils import SegmentationImage
        >>> segm = SegmentationImage([[0, 0, 0, 0, 0, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 0, 0, 0, 0, 0]])
        >>> segm.outline_segments()
        array([[0, 0, 0, 0, 0, 0],
               [0, 2, 2, 2, 2, 0],
               [0, 2, 0, 0, 2, 0],
               [0, 2, 0, 0, 2, 0],
               [0, 2, 2, 2, 2, 0],
               [0, 0, 0, 0, 0, 0]])
        """

        from scipy.ndimage import grey_erosion, grey_dilation

        # mode='constant' ensures outline is included on the image borders
        selem = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
        eroded = grey_erosion(self.data, footprint=selem, mode='constant',
                              cval=0.)
        dilated = grey_dilation(self.data, footprint=selem, mode='constant',
                                cval=0.)

        outlines = ((dilated != eroded) & (self.data != 0)).astype(int)
        outlines *= self.data

        if mask_background:
            outlines = np.ma.masked_where(outlines == 0, outlines)

        return outlines
コード例 #13
0
ファイル: Project.py プロジェクト: siyhust/python_gui
 def gradient(self):
     Type=self.getEdgeType()
     Size=self.image_temp.shape
     EdgeImage=np.zeros(Size)
     if (Type=='S'):
         EdgeImage=(ndimage.grey_dilation(self.image_temp,footprint=np.ones([3,3]))-ndimage.grey_erosion(self.image_temp,footprint=np.ones([3,3])))/2
     if (Type=='I'):
         EdgeImage=(self.image_temp-ndimage.grey_erosion(self.image_temp,footprint=np.ones([3,3])))/2
     if (Type=='E'):
         EdgeImage=(ndimage.grey_dilation(self.image_temp,footprint=np.ones([3,3]))-self.image_temp)/2
     self.showView4(EdgeImage)
コード例 #14
0
ファイル: predict_jan_13.py プロジェクト: skconsulting/ild
def calcSupNp(preprob, posp, lungs, imscan, pat, midx, psp, dictSubP, dimtabx):
    '''calculate the number of reticulation and HC in subpleural'''
    print 'number of subpleural for : ', pat, psp
    imgngray = np.copy(lungs)
    np.putmask(imgngray, imgngray == 1, 0)
    np.putmask(imgngray, imgngray > 0, 1)

# subErosion=  in mm
#avgPixelSpacing=0.734 in mm/ pixel
    subErosionPixel = int(round(2 * subErosion / avgPixelSpacing))
    erosion = ndimage.grey_erosion(
        imgngray, size=(
            subErosionPixel, subErosionPixel))

    np.putmask(erosion, erosion > 0, 1)
    mask_inv = np.bitwise_not(erosion)
    subpleurmask = np.bitwise_and(imgngray, mask_inv)

    ill = 0
    for ll in posp:

        xpat = ll[0]
        ypat = ll[1]

        proba = preprob[ill]
        prec, mprobai = maxproba(proba)
        classlabel = fidclass(prec, classif)

        if xpat >= midx:
            pospr = 1
            pospl = 0
        else:
            pospr = 0
            pospl = 1
        if classlabel == pat and mprobai > thrprobaUIP:
            tabpatch = np.zeros((dimtabx, dimtabx), np.uint8)
            tabpatch[ypat:ypat + dimpavy, xpat:xpat + dimpavx] = 1
            tabsubpl = np.bitwise_and(subpleurmask, tabpatch)

            area = tabsubpl.sum()
#                    check if area above threshold
            targ = float(area) / pxy

            if targ > thrpatch:
                dictSubP[pat]['all'] = (
                    dictSubP[pat]['all'][0] + pospl,
                    dictSubP[pat]['all'][1] + pospr)
                dictSubP[pat][psp] = (
                    dictSubP[pat][psp][0] + pospl,
                    dictSubP[pat][psp][1] + pospr)

        ill += 1
    return dictSubP
コード例 #15
0
ファイル: FitBackground.py プロジェクト: juliameier/pyphant1
 def fit_background(self, image, subscriber=0):
     poldegree = int(self.paramPoldegree.value)
     swidth = int(self.paramSwidth.value)
     sheight = int(self.paramSheight.value)
     threshold = int(self.paramThreshold.value)
     mediansize = int(self.paramMediansize.value)
     medianruns = int(self.paramMedianruns.value)
     darksize = int(self.paramDarksize.value)
     darkruns = int(self.paramDarkruns.value)
     brightsize = int(self.paramBrightsize.value)
     brightruns = int(self.paramBrightruns.value)
     dopreview = self.paramDopreview.value
     data = image.data
     # Median:
     for run in xrange(medianruns):
         data = ndimage.median_filter(data, size=mediansize)
     # Suspend dark spots:
     for run in xrange(darkruns):
         data = 255 - ndimage.grey_erosion(255 - data, size=darksize)
     # Suspend features:
     for run in xrange(brightruns):
         data = ndimage.grey_erosion(data, size=brightsize)
     # Fit background:
     if not dopreview:
         data = self.fit(data, poldegree, swidth, sheight, threshold)
     longname = "FitBackground"
     result = DataContainer.FieldContainer(
         data,
         copy.deepcopy(image.unit),
         copy.deepcopy(image.error),
         copy.deepcopy(image.mask),
         copy.deepcopy(image.dimensions),
         longname,
         image.shortname,
         copy.deepcopy(image.attributes),
         False,
     )
     result.seal()
     return result
コード例 #16
0
ファイル: initial.py プロジェクト: amit-iiitm/ANPR
def PreProcess(im):
   im=NMode(im)
   im1 = ndimage.grey_erosion(im, size=(10,10))
   scipy.misc.imsave("eroded.jpg",im1)
   im1= Image.open("eroded.jpg")
 
   im=ImageOps.equalize(im,0)
   im=ImageChops.difference(im1, im)
   #print ("image height %d and width %d\n"%(imh,imw))
 
 
   im=GBinarization(im)#binarize the image
   return im
コード例 #17
0
ファイル: scan_anal.py プロジェクト: limu007/Charlay
def calib(extra=False):
    global bins,refe,mall,dall,nomal
    #calibration plate
    sele=(array(mall)<20)*(array(dall)>700)
    from scipy import ndimage
    sele2=ndimage.grey_erosion(sele,2)
    ia,ib=where(sele2>0.5)
    nomal=mean([zzall[ia[i]][ib[i]] for i in range(100)],0)
    if extra:
        #testing individual lines:
        ps=array([sum(ia<i) for i in range(30,41)])
        koral=[mean([zzall[i+30][b] for b in ib[ps[i]:ps[i+1]]],0) for i in range(10)]
        return nomal,koral
    return nomal
コード例 #18
0
ファイル: skeleton.py プロジェクト: jni/skeletons
def erode(im):
    """`scipy.ndimage.grey_erosion` with size set to 3 on each axis.

    Parameters
    ----------
    im : np.ndarray, arbitrary type and shape.
        The input image.

    Returns
    -------
    out : np.ndarray, same type and shape as `im`
        The eroded image.
    """
    return nd.grey_erosion(im, size=[3] * im.ndim)
コード例 #19
0
ファイル: agglo2.py プロジェクト: cmriddle/gala
def fast_rag(labels, connectivity=1):
    """Build a data-free region adjacency graph quickly.

    Parameters
    ----------
    labels : array of int
        Image pre-segmentation or segmentation
    connectivity : int in {1, ..., labels.ndim}, optional
        Use square connectivity equal to `connectivity`. See
        `scipy.ndimage.generate_binary_structure` for more.

    Returns
    -------
    g : networkx Graph
        A graph where nodes represent regions in `labels` and edges
        indicate adjacency.

    Examples
    --------
    >>> labels = np.array([1, 1, 5, 5], dtype=np.int_)
    >>> fast_rag(labels).edges()
    [(1, 5)]
    >>> labels = np.array([[1, 1, 1, 2, 2],
    ...                    [1, 1, 1, 2, 2],
    ...                    [3, 3, 4, 4, 4],
    ...                    [3, 3, 4, 4, 4]], dtype=np.int_)
    >>> sorted(fast_rag(labels).edges())
    [(1, 2), (1, 3), (1, 4), (2, 4), (3, 4)]
    """
    conn = ndi.generate_binary_structure(labels.ndim, connectivity)
    eroded = ndi.grey_erosion(labels, footprint=conn)
    dilated = ndi.grey_dilation(labels, footprint=conn)
    boundaries0 = (eroded != labels)
    boundaries1 = (dilated != labels)
    labels_small = np.concatenate((eroded[boundaries0], labels[boundaries1]))
    labels_large = np.concatenate((labels[boundaries0], dilated[boundaries1]))
    n = np.max(labels_large) + 1
    # use a dummy broadcast array as data for RAG
    data = np.broadcast_to(np.ones((1,), dtype=np.int_),
                           labels_small.shape)
    sparse_graph = sparse.coo_matrix((data, (labels_small, labels_large)),
                                     dtype=np.int_, shape=(n, n)).tocsr()
    rag = nx.from_scipy_sparse_matrix(sparse_graph, edge_attribute='count')
    return rag
コード例 #20
0
ファイル: preprocess.py プロジェクト: gitter-badger/husc
def morphop(im, operation='open', radius='5'):
    """Perform a morphological operation with spherical structuring element.

    Parameters
    ----------
    im : array, shape (M, N[, P])
        2D or 3D grayscale image.
    operation : string, optional
        The operation to perform. Choices are 'opening', 'closing',
        'erosion', and 'dilation'. Imperative verbs also work, e.g.
        'dilate'.
    radius : int, optional
        The radius of the structuring element (disk or ball) used.

    Returns
    -------
    imout : array, shape (M, N[, P])
        The transformed image.

    Raises
    ------
    ValueError : if the image is not 2D or 3D.
    """
    if im.ndim == 2:
        selem = skmorph.disk(radius)
    elif im.ndim == 3:
        selem = skmorph.ball(radius)
    else:
        raise ValueError("Image input to 'morphop' should be 2D or 3D"
                         ", got %iD" % im.ndim)
    if operation.startswith('open'):
        imout = nd.grey_opening(im, footprint=selem)
    elif operation.startswith('clos'):
        imout = nd.grey_closing(im, footprint=selem)
    elif operation.startswith('dila'):
        imout = nd.grey_dilation(im, footprint=selem)
    elif operation.startswith('ero'):
        imout = nd.grey_erosion(im, footprint=selem)
    return imout
コード例 #21
0
ファイル: morpho.py プロジェクト: ricounet67/gala
def remove_merged_boundaries(labels, connectivity=1):
    """Remove boundaries in a label field when they separate the same region.

    By convention, the boundary label is 0, and labels are positive.

    Parameters
    ----------
    labels : array of int
        The label field to be processed.
    connectivity : int in {1, ..., labels.ndim}, optional
        The morphological connectivity for considering neighboring voxels.

    Returns
    -------
    labels_out : array of int
        The same label field, with unnecessary boundaries removed.

    Examples
    --------
    >>> labels = np.array([[1, 0, 1], [0, 1, 0], [2, 0, 3]], np.int)
    >>> remove_merged_boundaries(labels)
    array([[1, 1, 1],
           [0, 1, 0],
           [2, 0, 3]])
    """
    boundary = 0
    labels_out = labels.copy()
    is_boundary = (labels == boundary)
    labels_complement = labels.copy()
    labels_complement[is_boundary] = labels.max() + 1
    se = nd.generate_binary_structure(labels.ndim, connectivity)
    smaller_labels = nd.grey_erosion(labels_complement, footprint=se)
    bigger_labels = nd.grey_dilation(labels, footprint=se)
    merged = is_boundary & (smaller_labels == bigger_labels)
    labels_out[merged] = smaller_labels[merged]
    return labels_out
コード例 #22
0
def process8mm(filenames, outputpath):
    files = filenames.split(',')
    # Select the midrange image for figuring out the cropping
    filename = files[1]
    if 3 != len(files):
        logger.error("Need three filenames")
        sys.exit(1)

    ofiles = [os.path.isfile("%s/%s" % (outputpath, os.path.basename(xx))) for xx in files]
    if [True, True, True] == ofiles:
        logger.debug("Output files aready exist for %s" % filenames)
        return

    if options.whitecount:
        for file in filenames:
            whiteCount(file)

    imp = PILImage.open(filename).convert('L')
    im = scipy.misc.fromimage(imp, flatten = True).astype(numpy.uint8)
    #(fcWidth, fcHeight) = im.shape
    (fcHeight, fcWidth) = im.shape
    #im = im[:,:400]
    im = im[:,:300]
    im1 = ndimage.grey_erosion(im, size=(25, 25))

    im1[im1 < 100] = 0
    im1[im1 >= 100] = 255
    if options.debug and eroded_dir is not None:
        scipy.misc.imsave('eroded/%s' % os.path.basename(filename), im1)

    im1Image = scipy.misc.toimage(im1)
    (spLeftX, spCenterY) = find8mmSprocket(im1Image, filename)
    spLeftX = 152
    logger.debug( "%s leftX %u centerY %u" % (filename, spLeftX, spCenterY))
    if (0, 0) == (spLeftX, spCenterY):
        logger.error("Cannot process %s" % filename)
        return

    pxPerMm = 393
    frameOriginX = int(spLeftX + (1.53 * pxPerMm))
    frameOriginY = int(spCenterY - (1.69 * pxPerMm))

    # FUDGE FACTOR for misaligned images
    #frameOriginY -= (122  + (.455 * pxPerMm))
#    frameOriginY -= ((.465 * pxPerMm))

    frameWidth = int(4.57 * pxPerMm)
    frameHeight = int(3.39 * pxPerMm)

    if frameWidth % 2 == 1:
        frameWidth += 1
    # crop and save
#    if ((frameOriginX + frameWidth) > fcWidth) or ((frameOriginY + frameHeight) > fcHeight):
#        logger.error("Crop tile out of bounds %u x %u > %u x %u" % (frameOriginX + frameWidth, fcWidth, frameOriginY + frameHeight, fcHeight))
#        return

    if options.debug and bw_dir is not None:
        bwd = ImageDraw.Draw(imp)
        bwd.line((spLeftX, spCenterY, fcWidth, spCenterY), fill=0)
        bwd.line((spLeftX, spCenterY - (1.64 * pxPerMm),
            spLeftX, spCenterY + (1.64 * pxPerMm)), fill = 0)
        bwd.rectangle((frameOriginX, frameOriginY,
            frameOriginX + frameWidth,
            frameOriginY + frameHeight), fill=192)
        imp.save('%s/%s' % (bw_dir, os.path.basename(filename)))

    for iFile in files:
        try:
            fullColor = PILImage.open(iFile)
            fullColor = fullColor.crop((int(frameOriginX), int(frameOriginY),
                 int(frameOriginX + frameWidth),
                 int(frameOriginY + frameHeight)))
            fullColor.save('%s/%s' % (outputpath, os.path.basename(iFile)))
        except:
            logger.error("Did not save %s/%s" % (outputpath, os.path.basename(iFile)))
コード例 #23
0
ファイル: gray.py プロジェクト: rfezzani/scikit-image
def erosion(image, footprint=None, out=None, shift_x=False, shift_y=False):
    """Return grayscale morphological erosion of an image.

    Morphological erosion sets a pixel at (i,j) to the minimum over all pixels
    in the neighborhood centered at (i,j). Erosion shrinks bright regions and
    enlarges dark regions.

    Parameters
    ----------
    image : ndarray
        Image array.
    footprint : ndarray, optional
        The neighborhood expressed as an array of 1's and 0's.
        If None, use cross-shaped footprint (connectivity=1). This can also
        be a sequence of 2-tuples where the first element of each 2-tuple is a
        footprint and the second element as an integer describing the number of
        times it should be iterated.
    out : ndarrays, optional
        The array to store the result of the morphology. If None is
        passed, a new array will be allocated.
    shift_x, shift_y : bool, optional
        shift footprint about center point. This only affects
        eccentric footprints (i.e. footprint with even numbered
        sides).

    Returns
    -------
    eroded : array, same shape as `image`
        The result of the morphological erosion.

    Notes
    -----
    For ``uint8`` (and ``uint16`` up to a certain bit-depth) data, the
    lower algorithm complexity makes the `skimage.filters.rank.minimum`
    function more efficient for larger images and footprints.

    Examples
    --------
    >>> # Erosion shrinks bright regions
    >>> import numpy as np
    >>> from skimage.morphology import square
    >>> bright_square = np.array([[0, 0, 0, 0, 0],
    ...                           [0, 1, 1, 1, 0],
    ...                           [0, 1, 1, 1, 0],
    ...                           [0, 1, 1, 1, 0],
    ...                           [0, 0, 0, 0, 0]], dtype=np.uint8)
    >>> erosion(bright_square, square(3))
    array([[0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0],
           [0, 0, 1, 0, 0],
           [0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0]], dtype=uint8)

    """
    if out is None:
        out = np.empty_like(image)

    if _footprint_is_sequence(footprint):
        footprints = tuple(
            (_shift_footprint(fp, shift_x, shift_y), n) for fp, n in footprint)
        return _iterate_gray_func(ndi.grey_erosion, image, footprints, out)

    footprint = np.array(footprint)
    footprint = _shift_footprint(footprint, shift_x, shift_y)
    ndi.grey_erosion(image, footprint=footprint, output=out)
    return out
コード例 #24
0
# Example from https://ilovesymposia.com/2017/03/12/scipys-new-lowlevelcallable-is-a-game-changer/

import contextlib
import time

from scipy import ndimage as ndi
import numpy as np

image = np.random.random((2048, 2048))
footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=bool)


@contextlib.contextmanager
def timing():
    start = time.time()
    yield
    delta = time.time() - start
    print(f"Execution time: {delta:.03}s")


with timing():
    ndi.grey_erosion(image, footprint=footprint)
with timing():
    ndi.generic_filter(image, np.min, footprint=footprint)
コード例 #25
0
def rag_boundary(labels, edge_map, connectivity=2):
    """ Comouter RAG based on region boundaries

    Given an image's initial segmentation and its edge map this method
    constructs the corresponding Region Adjacency Graph (RAG). Each node in the
    RAG represents a set of pixels within the image with the same label in
    `labels`. The weight between two adjacent regions is the average value
    in `edge_map` along their boundary.

    labels : ndarray
        The labelled image.
    edge_map : ndarray
        This should have the same shape as that of `labels`. For all pixels
        along the boundary between 2 adjacent regions, the average value of the
        corresponding pixels in `edge_map` is the edge weight between them.
    connectivity : int, optional
        Pixels with a squared distance less than `connectivity` from each other
        are considered adjacent. It can range from 1 to `labels.ndim`. Its
        behavior is the same as `connectivity` parameter in
        `scipy.ndimage.filters.generate_binary_structure`.

    Examples
    --------
    >>> from skimage import data, segmentation, filters, color
    >>> from skimage.future import graph
    >>> img = data.chelsea()
    >>> labels = segmentation.slic(img)
    >>> edge_map = filters.sobel(color.rgb2gray(img))
    >>> rag = graph.rag_boundary(labels, edge_map)

    """

    conn = ndi.generate_binary_structure(labels.ndim, connectivity)
    eroded = ndi.grey_erosion(labels, footprint=conn)
    dilated = ndi.grey_dilation(labels, footprint=conn)
    boundaries0 = (eroded != labels)
    boundaries1 = (dilated != labels)
    labels_small = np.concatenate((eroded[boundaries0], labels[boundaries1]))
    labels_large = np.concatenate((labels[boundaries0], dilated[boundaries1]))
    n = np.max(labels_large) + 1

    # use a dummy broadcast array as data for RAG
    ones = as_strided(np.ones((1, ), dtype=np.float),
                      shape=labels_small.shape,
                      strides=(0, ))
    count_matrix = sparse.coo_matrix((ones, (labels_small, labels_large)),
                                     dtype=np.int_,
                                     shape=(n, n)).tocsr()
    data = np.concatenate((edge_map[boundaries0], edge_map[boundaries1]))

    data_coo = sparse.coo_matrix((data, (labels_small, labels_large)))
    graph_matrix = data_coo.tocsr()
    graph_matrix.data /= count_matrix.data

    rag = RAG()
    rag.add_weighted_edges_from(_edge_generator_from_csr(graph_matrix),
                                weight='weight')
    rag.add_weighted_edges_from(_edge_generator_from_csr(count_matrix),
                                weight='count')

    for n in rag.nodes():
        rag.node[n].update({'labels': [n]})

    return rag
コード例 #26
0
ファイル: env.py プロジェクト: zachabarnes/slither-rl-agent
 def connected_components(self, frame):
     sing_frame = ndimage.grey_erosion(frame[:, :, 1], size=(2, 2))
     blur_radius = .35
     sing_frame = ndimage.gaussian_filter(sing_frame, blur_radius)
     labeled, self.nr_objects = ndimage.label(sing_frame)
     return labeled[:, :, np.newaxis]
コード例 #27
0
import matplotlib.pyplot as plt
import scipy.ndimage as ndi
import numpy as np

img = np.zeros((16, 16))
img[4:-4, 4:-4] = 1

img = ndi.distance_transform_bf(img)

dilation = ndi.grey_dilation(img, size=(3, 3), structure=np.ones((3, 3)))

erosion = ndi.grey_erosion(img, size=(3, 3), structure=np.ones((3, 3)))

output = [img, dilation, erosion]
titles = ['Original', 'Dilation', 'Erosion']

for i in range(3):
    print(output[i])
    plt.subplot(1, 3, i + 1)
    plt.imshow(output[i], interpolation='nearest', cmap='spectral')
    plt.title(titles[i])
    plt.axis('off')
plt.show()
コード例 #28
0
ファイル: composites.py プロジェクト: khunger/dwd_extensions
def _create_fernsehbild_rgba(self, ct_alpha_def,
                             erosion_size=5, gaussion_filter_sigma=3,
                             dark_transparency_factor=3.0,
                             contrast_optimization_expr=None,
                             backup_orig_data=False):
    """
    """
    if contrast_optimization_expr is None:
        contrast_optimization_expr = "hist_equalize(inputdata, 8, 254)"

    ct_chn = self["CloudType"]
    ct_data = ct_chn.cloudtype

    ct_alpha = np.ones(ct_data.shape)
    for ct in range(len(ct_alpha_def)):
        if ct_alpha_def[ct] < 1.0:
            ct_alpha[(ct_data == ct)] = ct_alpha_def[ct]

    # mask already masked data
    ct_alpha[ct_data.mask] = 0.0
    # ct_mask = ct_alpha < 0.01

    # shrink alpha mask to ensure that smoothed edges are inside mask
    import scipy.ndimage as ndi
    ct_alpha = ndi.grey_erosion(
        ct_alpha, size=(erosion_size, erosion_size)).astype(
        ct_alpha.dtype)

    self.check_channels("HRV", 0.85, 10.8)

    if not self._dwd_channel_preparation(["HRV", 0.85, 10.8],
                                         backup_orig_data=backup_orig_data):
        return None

    # get combination of HRV and VIS008 channel data
    hrvc_chn = self._dwd_get_hrvc_channel()

    img_type = self._dwd_get_image_type()
    if img_type is None:
        return None

    # extract the clouds for hrvis channel
    hrvc_clouds = hrvc_chn.data.copy()
    # hrvc_clouds.mask[ct_mask] = True

    median = np.ma.median(hrvc_clouds)
    mean = np.ma.mean(hrvc_clouds)
    comp = hrvc_clouds.compressed()
    max_value = np.percentile(comp, 97)
    LOGGER.debug("HRVIS median: {0}, mean: {1}, diff: {2}, min: {3}, max: {4}".
                 format(median, mean, abs(median - mean),
                        hrvc_clouds.min(), max_value))

    # execute contrast optimization function (i.e. histogram equalisation)
    hrvc_clouds = select_range_and_scale(hrvc_clouds, 0, 100, 255)
    d = eval(contrast_optimization_expr, globals(), {'inputdata': hrvc_clouds})
    d.mask = False

    day_img = geo_image.GeoImage(d,
                                 self.area,
                                 get_first(self.time_slot),
                                 fill_value=0,
                                 mode="L",
                                 crange=(0, 255))
#    day_img.enhance(stretch="histogram")

    # extract the clouds for infrared channel
    ir_clouds = self[10.8].data.copy()
    # ir_clouds.mask[ct_mask] = True

    median = np.ma.median(ir_clouds)
    mean = np.ma.mean(ir_clouds)
    max_value = np.ma.max(ir_clouds)
    LOGGER.debug("IR median: {0}, mean: {1}, diff: {2}, min: {3}, max: {4}".
                 format(median, mean, abs(median - mean),
                        ir_clouds.min(), max_value))

    median = np.ma.median(ir_clouds)

    # execute contrast optimization function (i.e. histogram equalisation)
    ir_clouds = select_range_and_scale(ir_clouds, 40, -87.5, 255)
    d = eval(contrast_optimization_expr, globals(), {'inputdata': ir_clouds})
    d.mask = False

    night_img = geo_image.GeoImage(d,
                                   self.area,
                                   get_first(self.time_slot),
                                   fill_value=0,
                                   mode="L",
                                   crange=(0, 255))
#    night_img.enhance(stretch="histogram")

    if img_type == IMAGETYPES.DAY_ONLY:
        img = day_img

    if img_type == IMAGETYPES.NIGHT_ONLY:
        img = night_img

    if img_type == IMAGETYPES.DAY_NIGHT:
        alpha_data =\
            self._dwd_get_day_night_alpha_channel().data.astype(np.float64)\
            / 255.0
        # create day image
        day_img.putalpha(alpha_data)
        day_img.enhance(inverse=(False, True))
        # create night image
        night_img.putalpha(alpha_data)
        blend(night_img, day_img)
        img = night_img
        img.convert("L")

    if gaussion_filter_sigma is not None:
        # smooth alpha channel
        ct_alpha = ndi.gaussian_filter(ct_alpha, gaussion_filter_sigma)

    if dark_transparency_factor is not None:
        # add transparency to dark image areas
        ct_alpha = np.minimum(ct_alpha, img.channels[0] *
                              dark_transparency_factor)

    img.convert("RGBA")
    img.putalpha(ct_alpha)
    img.fill_value = None


#     img = geo_image.GeoImage(ct_alpha*255.0,
#                                  self.area,
#                                  self.time_slot,
#                                  fill_value=0,
#                                  mode="L",
#                                  crange=(0, 255))

    return img
コード例 #29
0
    def run(self, workspace):
        '''Run the module on the image set'''
        seed_objects_name = self.seed_objects_name.value
        skeleton_name = self.image_name.value
        seed_objects = workspace.object_set.get_objects(seed_objects_name)
        labels = seed_objects.segmented
        labels_count = np.max(labels)
        label_range = np.arange(labels_count, dtype=np.int32) + 1

        skeleton_image = workspace.image_set.get_image(
                skeleton_name, must_be_binary=True)
        skeleton = skeleton_image.pixel_data
        if skeleton_image.has_mask:
            skeleton = skeleton & skeleton_image.mask
        try:
            labels = skeleton_image.crop_image_similarly(labels)
        except:
            labels, m1 = cpo.size_similarly(skeleton, labels)
            labels[~m1] = 0
        #
        # The following code makes a ring around the seed objects with
        # the skeleton trunks sticking out of it.
        #
        # Create a new skeleton with holes at the seed objects
        # First combine the seed objects with the skeleton so
        # that the skeleton trunks come out of the seed objects.
        #
        # Erode the labels once so that all of the trunk branchpoints
        # will be within the labels
        #
        #
        # Dilate the objects, then subtract them to make a ring
        #
        my_disk = morph.strel_disk(1.5).astype(int)
        dilated_labels = grey_dilation(labels, footprint=my_disk)
        seed_mask = dilated_labels > 0
        combined_skel = skeleton | seed_mask

        closed_labels = grey_erosion(dilated_labels,
                                     footprint=my_disk)
        seed_center = closed_labels > 0
        combined_skel = combined_skel & (~seed_center)
        #
        # Fill in single holes (but not a one-pixel hole made by
        # a one-pixel image)
        #
        if self.wants_to_fill_holes:
            def size_fn(area, is_object):
                return (~ is_object) and (area <= self.maximum_hole_size.value)

            combined_skel = morph.fill_labeled_holes(
                    combined_skel, ~seed_center, size_fn)
        #
        # Reskeletonize to make true branchpoints at the ring boundaries
        #
        combined_skel = morph.skeletonize(combined_skel)
        #
        # The skeleton outside of the labels
        #
        outside_skel = combined_skel & (dilated_labels == 0)
        #
        # Associate all skeleton points with seed objects
        #
        dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                    dilated_labels,
                                                    combined_skel, 1)
        #
        # Get rid of any branchpoints not connected to seeds
        #
        combined_skel[dlabels == 0] = False
        #
        # Find the branchpoints
        #
        branch_points = morph.branchpoints(combined_skel)
        #
        # Odd case: when four branches meet like this, branchpoints are not
        # assigned because they are arbitrary. So assign them.
        #
        # .  .
        #  B.
        #  .B
        # .  .
        #
        odd_case = (combined_skel[:-1, :-1] & combined_skel[1:, :-1] &
                    combined_skel[:-1, 1:] & combined_skel[1, 1])
        branch_points[:-1, :-1][odd_case] = True
        branch_points[1:, 1:][odd_case] = True
        #
        # Find the branching counts for the trunks (# of extra branches
        # eminating from a point other than the line it might be on).
        #
        branching_counts = morph.branchings(combined_skel)
        branching_counts = np.array([0, 0, 0, 1, 2])[branching_counts]
        #
        # Only take branches within 1 of the outside skeleton
        #
        dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
        branching_counts[~dilated_skel] = 0
        #
        # Find the endpoints
        #
        end_points = morph.endpoints(combined_skel)
        #
        # We use two ranges for classification here:
        # * anything within one pixel of the dilated image is a trunk
        # * anything outside of that range is a branch
        #
        nearby_labels = dlabels.copy()
        nearby_labels[distance_map > 1.5] = 0

        outside_labels = dlabels.copy()
        outside_labels[nearby_labels > 0] = 0
        #
        # The trunks are the branchpoints that lie within one pixel of
        # the dilated image.
        #
        if labels_count > 0:
            trunk_counts = fix(scind.sum(branching_counts, nearby_labels,
                                         label_range)).astype(int)
        else:
            trunk_counts = np.zeros((0,), int)
        #
        # The branches are the branchpoints that lie outside the seed objects
        #
        if labels_count > 0:
            branch_counts = fix(scind.sum(branch_points, outside_labels,
                                          label_range))
        else:
            branch_counts = np.zeros((0,), int)
        #
        # Save the endpoints
        #
        if labels_count > 0:
            end_counts = fix(scind.sum(end_points, outside_labels, label_range))
        else:
            end_counts = np.zeros((0,), int)
        #
        # Calculate the distances
        #
        total_distance = morph.skeleton_length(
                dlabels * outside_skel, label_range)
        #
        # Save measurements
        #
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        feature = "_".join((C_NEURON, F_NUMBER_TRUNKS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, trunk_counts)
        feature = "_".join((C_NEURON, F_NUMBER_NON_TRUNK_BRANCHES,
                            skeleton_name))
        m.add_measurement(seed_objects_name, feature, branch_counts)
        feature = "_".join((C_NEURON, F_NUMBER_BRANCH_ENDS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, end_counts)
        feature = "_".join((C_NEURON, F_TOTAL_NEURITE_LENGTH, skeleton_name))
        m[seed_objects_name, feature] = total_distance
        #
        # Collect the graph information
        #
        if self.wants_neuron_graph:
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            intensity_image = workspace.image_set.get_image(
                    self.intensity_image_name.value)
            edge_graph, vertex_graph = self.make_neuron_graph(
                    combined_skel, dlabels,
                    trunk_mask,
                    branch_points & ~trunk_mask,
                    end_points,
                    intensity_image.pixel_data)

            image_number = workspace.measurements.image_set_number

            edge_path, vertex_path = self.get_graph_file_paths(m, m.image_number)
            workspace.interaction_request(
                    self, m.image_number, edge_path, edge_graph,
                    vertex_path, vertex_graph, headless_ok=True)

            if self.show_window:
                workspace.display_data.edge_graph = edge_graph
                workspace.display_data.vertex_graph = vertex_graph
                workspace.display_data.intensity_image = intensity_image.pixel_data
        #
        # Make the display image
        #
        if self.show_window or self.wants_branchpoint_image:
            branchpoint_image = np.zeros((skeleton.shape[0],
                                          skeleton.shape[1],
                                          3))
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            branch_mask = branch_points & (outside_labels != 0)
            end_mask = end_points & (outside_labels != 0)
            branchpoint_image[outside_skel, :] = 1
            branchpoint_image[trunk_mask | branch_mask | end_mask, :] = 0
            branchpoint_image[trunk_mask, 0] = 1
            branchpoint_image[branch_mask, 1] = 1
            branchpoint_image[end_mask, 2] = 1
            branchpoint_image[dilated_labels != 0, :] *= .875
            branchpoint_image[dilated_labels != 0, :] += .1
            if self.show_window:
                workspace.display_data.branchpoint_image = branchpoint_image
            if self.wants_branchpoint_image:
                bi = cpi.Image(branchpoint_image,
                               parent_image=skeleton_image)
                workspace.image_set.add(self.branchpoint_image_name.value, bi)
コード例 #30
0
def erode(img, selem):
    """腐蚀操作"""
    selem = np.array(selem)
    out = np.empty_like(img)
    ndi.grey_erosion(img, footprint=selem, output=out)
    return out
コード例 #31
0
import cv2
import matplotlib.pyplot as plt
from scipy import ndimage

img = cv2.imread('fp.tif')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
plt.imshow(img, cmap='gray')
plt.title('Original Image')
plt.show()

img2 = ndimage.grey_erosion(img, size=(2, 2))
plt.imshow(img2, cmap='gray')
plt.title('2x2 Erosion')
plt.show()

img2 = ndimage.grey_erosion(img, size=(3, 3))
plt.imshow(img2, cmap='gray')
plt.title('3x3 Erosion')
plt.show()

img2 = ndimage.grey_erosion(img, size=(4, 4))
plt.imshow(img2, cmap='gray')
plt.title('4x4 Erosion')
plt.show()
コード例 #32
0
ファイル: refine.py プロジェクト: baigouy/EPySeg
    def process(self, input=None, output_folder=None, progress_callback=None, filter=None,
                 correction_factor=2,
                 cutoff_cell_fusion=None,
                 restore_safe_cells=False,
                 _DEBUG=False,
                 _VISUAL_DEBUG=False, **kwargs):

        start = timer()
        # filename0 = path
        # filename0_without_path = os.path.basename(filename0)
        # filename0_without_ext = os.path.splitext(filename0_without_path)[0]
        # parent_dir_of_filename0 = os.path.dirname(filename0)
        # TA_output_filename = os.path.join(parent_dir_of_filename0, filename0_without_ext,
        #                                   'handCorrection.tif')  # TODO allow custom names here to allow ensemble methods
        # non_TA_final_output_name = os.path.join(output_folder, filename0_without_ext + '.tif')
        #
        # filename_to_use_to_save = non_TA_final_output_name
        # if TA_mode:
        #     filename_to_use_to_save = TA_output_filename
        #
        # if TA_mode:
        #     # try also to change path input name
        #     if os.path.exists(
        #             os.path.join(parent_dir_of_filename0, filename0_without_ext, 'raw_epyseg_output.tif')):
        #         path = os.path.join(parent_dir_of_filename0, filename0_without_ext, 'raw_epyseg_output.tif')

        # img_orig = Img(path)
        # print('analyzing', path, self.stop_now)
        # try:
        #     if self.progress_callback is not None:
        #         self.progress_callback.emit((iii / len(list_of_files)) * 100)
        #     else:
        #         logger.info(str((iii / len(list_of_files)) * 100) + '%')
        # except:
        #     traceback.print_exc()
        #     pass

        # DO A DILATION OF SEEDS THEN AN EROSION TO JOIN CLOSE BY SEEDS

        img_orig = input

        img_has_seeds = True
        # mask with several channels
        if img_orig.has_c():
            if restore_safe_cells:
                img_seg = img_orig[..., 0].copy()

            seeds_1 = img_orig[..., img_orig.shape[-1] - 1]
            seeds_1 = Img.invert(seeds_1)
            # seeds_1[seeds_1 >= 0.5] = 255
            # seeds_1[seeds_1 < 0.5] = 0
            seeds_1[seeds_1 >= 0.2] = 255  # TODO maybe be more stringent here
            seeds_1[seeds_1 < 0.2] = 0

            s = ndimage.generate_binary_structure(2, 1)
            seeds_1 = ndimage.grey_dilation(seeds_1, footprint=s)
            seeds_1 = ndimage.grey_dilation(seeds_1, footprint=s)
            seeds_1 = ndimage.grey_dilation(seeds_1, footprint=s)
            seeds_1 = ndimage.grey_erosion(seeds_1, footprint=s)
            seeds_1 = ndimage.grey_erosion(seeds_1, footprint=s)
            # seeds_1 = ndimage.grey_erosion(seeds_1, footprint=s)
            # seeds_1 = ndimage.grey_erosion(seeds_1, footprint=s)

            # for debug
            if _DEBUG:
                Img(seeds_1, dimensions='hw').save(
                    os.path.join(output_folder, 'extras', 'wshed_seeds.tif'))  # not bad

            lab_seeds = label(seeds_1.astype(np.uint8), connectivity=2, background=0)
            #
            for region in regionprops(lab_seeds):
                if region.area < 10:
                    for coordinates in region.coords:
                        lab_seeds[coordinates[0], coordinates[1]] = 0

            if _DEBUG:
                Img(seeds_1, dimensions='hw').save(
                    os.path.join(output_folder, 'extras', 'wshed_seeds_deblobed.tif'))

            img_orig[..., 3] = Img.invert(img_orig[..., 3])
            img_orig[..., 4] = Img.invert(img_orig[..., 4])

            # seems to work --> now need to do the projection
            for c in range(1, img_orig.shape[-1] - 2):
                img_orig[..., 0] += img_orig[..., 1]

            img_orig[..., 0] /= img_orig.shape[-1] - 2
            img_orig = img_orig[..., 0]

        else:
            # mask with single channel
            img_has_seeds = False
            if restore_safe_cells:
                img_seg = img_orig.copy()

        if restore_safe_cells:
            if _DEBUG:
                print(os.path.join(output_folder, 'extras', 'img_seg.tif'))
                Img(img_seg, dimensions='hw').save(
                    os.path.join(output_folder, 'extras', 'img_seg.tif'))

        # for debug
        if _DEBUG:
            Img(img_orig, dimensions='hw').save(os.path.join(output_folder, 'extras', 'avg.tif'))

        img_saturated = img_orig.copy()
        if img_has_seeds:
            img_saturated[img_saturated >= 0.5] = 255
            img_saturated[img_saturated < 0.5] = 0
            if restore_safe_cells:
                # TODO maybe do a safe image
                img_seg[img_seg >= 0.3] = 255
                img_seg[img_seg < 0.3] = 0
                secure_mask = img_seg
        else:
            img_saturated[img_saturated >= 0.3] = 255
            img_saturated[img_saturated < 0.3] = 0
            if restore_safe_cells:
                img_seg[img_seg >= 0.95] = 255
                img_seg[img_seg < 0.95] = 0
                secure_mask = img_seg

        # convert it to seeds and make sure they are all present in there
        # if pixel is not labeled then read it
        if restore_safe_cells:
            labels_n_area_rescue_seeds = {}
            rescue_seeds = label(Img.invert(secure_mask), connectivity=1, background=0)
            for region in regionprops(rescue_seeds):
                labels_n_area_rescue_seeds[region.label] = region.area
            if _DEBUG:
                Img(secure_mask, dimensions='hw').save(os.path.join(output_folder, 'extras', 'secure_mask.tif'))
        # loop over those seeds to rescue

        # for debug
        if _DEBUG:
            Img(img_saturated, dimensions='hw').save(
                os.path.join(output_folder, 'extras', 'handCorrection.tif'))

        deblob = True
        if deblob:
            image_thresh = label(img_saturated, connectivity=2, background=0)
            # for debug
            if _DEBUG:
                Img(image_thresh, dimensions='hw').save(
                    os.path.join(output_folder, 'extras', 'before_deblobed.tif'))
            # deblob
            min_size = 200
            for region in regionprops(image_thresh):
                # take regions with large enough areas
                if region.area < min_size:
                    for coordinates in region.coords:
                        image_thresh[coordinates[0], coordinates[1]] = 0

            image_thresh[image_thresh > 0] = 255
            img_saturated = image_thresh
            # for debug
            if _DEBUG:
                Img(img_saturated, dimensions='hw').save(
                    os.path.join(output_folder, 'extras', 'deblobed.tif'))
            del image_thresh

        # for debug
        if _DEBUG:
            Img(img_saturated, dimensions='hw').save(
                os.path.join(output_folder, 'extras', 'deblobed_out.tif'))

        extra_dilations = True
        if extra_dilations:
            # do a dilation of 2 to close bonds
            s = ndimage.generate_binary_structure(2, 1)
            dilated = ndimage.grey_dilation(img_saturated, footprint=s)
            dilated = ndimage.grey_dilation(dilated, footprint=s)
            # Img(dilated, dimensions='hw').save(os.path.join(os.path.splitext(path)[0], 'filled_one_px_holes.tif'))

            # other_seeds = label(invert(np.grey_dilation(dilated, footprint=s).astype(np.uint8)), connectivity=1, background=0)

            labs = label(Img.invert(img_saturated.astype(np.uint8)), connectivity=1, background=0)
            for region in regionprops(labs):
                seeds = []

                # exclude tiny cells form dilation because they may end up completely closed
                if region.area >= 10 and region.area < 350:
                    for coordinates in region.coords:
                        dilated[coordinates[0], coordinates[1]] = 0
                    continue
                else:
                    # pb when big cells around cause connections are not done
                    # preserve cells at edges because they have to e naturally smaller because they are cut
                    # put a size criterion too
                    if region.area < 100 and (
                            region.bbox[0] <= 1 or region.bbox[1] <= 1 or region.bbox[2] >= labs.shape[-2] - 2 or
                            region.bbox[
                                3] >= \
                            labs.shape[-1] - 2):
                        # edge cell detected --> removing dilation
                        for coordinates in region.coords:
                            dilated[coordinates[0], coordinates[1]] = 0
                        continue

            img_saturated = dilated
            # for debug
            if _DEBUG:
                Img(img_saturated, dimensions='hw').save(
                    os.path.join(output_folder, 'extras', 'dilated_further.tif'))
            del dilated

        list_of_cells_to_dilate = []
        labs = label(Img.invert(img_saturated.astype(np.uint8)), connectivity=1, background=0)

        # c'est cette correction qui fixe bcp de choses mais recree aussi des choses qui n'existent pas... --> voir à quoi sont dus ces lignes blobs
        # faudrait redeblober
        if img_has_seeds:
            for region in regionprops(labs, intensity_image=img_orig):
                seeds = []

                if not extra_dilations and region.area < 10:
                    continue

                # if small and no associated seeds --> remove it ??? maybe or not
                for coordinates in region.coords:
                    id = lab_seeds[coordinates[0], coordinates[1]]
                    if id != 0:
                        seeds.append(id)

                seeds = set(seeds)

                if len(seeds) >= 2:
                    # we may have found an undersegmented cell --> try segment it better
                    list_of_cells_to_dilate.append(region.label)

        if len(list_of_cells_to_dilate) != 0:
            props = regionprops(labs, intensity_image=img_orig)
            for run in range(10):
                something_changed = False  # early stop

                for region in props:
                    if region.label not in list_of_cells_to_dilate:
                        continue

                    # TODO recheck those values and wether it makes sense
                    threshold_values = [80 / 255, 60 / 255, 40 / 255, 30 / 255,
                                        20 / 255,
                                        10 / 255]  # 160 / 255, 140 / 255, 120 / 255, 100 / 255,  1 / 255 , 2 / 255, , 5 / 255

                    try:
                        for threshold in threshold_values:
                            mask = region.image.copy()
                            image = region.image.copy()
                            image[region.intensity_image > threshold] = True
                            image[region.intensity_image <= threshold] = False
                            final = Img.invert(image.astype(np.uint8))
                            final[final < 255] = 0
                            final[mask == False] = 0
                            new_seeds = label(final, connectivity=1, background=0)
                            props2 = regionprops(new_seeds)
                            if len(props2) > 1:  # cell was resplitted into smaller
                                for r in props2:
                                    if r.area < 20:
                                        raise Exception

                                region.image[mask == False] = False
                                region.image[mask == True] = True
                                region.image[new_seeds > 0] = False
                                something_changed = True
                                for coordinates in region.coords:
                                    img_saturated[coordinates[0], coordinates[1]] = 255
                            region.image[mask == False] = False
                            region.image[mask == True] = True
                            del final
                            del new_seeds
                    except:
                        traceback.print_exc()
                        pass

                if not something_changed:
                    # print('no more changes anymore --> quitting')
                    break

        # for debug
        if _DEBUG:
            Img(img_saturated, dimensions='hw').save(
                os.path.join(output_folder, 'extras', 'saturated_mask4.tif'))

        final_seeds = label(Img.invert(img_saturated), connectivity=1,
                            background=0)  # keep like that otherwise creates tiny cells with erroneous wshed

        # for debug
        if _DEBUG:
            Img(final_seeds, dimensions='hw').save(
                os.path.join(output_folder, 'extras', 'final_seeds_before.tif'))
        final_seeds = label(Img.invert(img_saturated), connectivity=2, background=0)  # is that needed ???
        # for debug
        if _DEBUG:
            Img(final_seeds, dimensions='hw').save(
                os.path.join(output_folder, 'extras', 'final_seeds_before2.tif'))

        final_seeds[img_saturated == 255] = 0
        final_wshed = watershed(img_orig, markers=final_seeds,
                                watershed_line=True)

        final_wshed[final_wshed != 0] = 1  # remove all seeds
        final_wshed[final_wshed == 0] = 255  # set wshed values to 255
        final_wshed[final_wshed == 1] = 0  # set all other cell content to

        # filename0 = os.path.basename(path)
        # parent_path = os.path.dirname(os.path.dirname(path))

        if filter is None or filter == 0:
            # TODO maybe offer the choice between saving wshed on predict or on orig
            # Img(final_wshed, dimensions='hw').save(os.path.join(output_folder, os.path.splitext(filename0)[
            #     0]) + '.tif')  # need put original name here  TODO put image default name here
            # print('saving', filename_to_use_to_save)
            # Img(final_wshed.astype(np.uint8), dimensions='hw').save(filename_to_use_to_save)
            return final_wshed.astype(np.uint8)
        else:
            if isinstance(filter, int):
                filter_by_size = filter
            else:
                filter_by_size = None
            avg_area = 0
            count = 0
            if _DEBUG:
                Img(final_wshed, dimensions='hw').save(os.path.join(output_folder, 'extras', 'test_size_cells.tif'))

            final_seeds = Img.invert(final_wshed)
            final_seeds = label(final_seeds, connectivity=1, background=0)

            if _VISUAL_DEBUG:
                plt.imshow(final_seeds)
                plt.show()

            removed_seeds = []
            keep_seeds = []

            labels_n_bbox = {}
            labels_n_area = {}
            border_cells = []
            ids_n_local_median = {}
            correspondance_between_cur_seeds_and_safe_ones = {}

            if isinstance(filter, str) and 'local' in filter:
                rps = regionprops(final_seeds)

                for region in rps:
                    labels_n_bbox[region.label] = region.bbox
                    labels_n_area[region.label] = region.area
                    if (region.bbox[0] <= 3 or region.bbox[1] <= 3 or region.bbox[2] >= final_seeds.shape[-2] - 5 or
                            region.bbox[
                                3] >= \
                            final_seeds.shape[-1] - 5):
                        border_cells.append(region.label)
                    if restore_safe_cells:
                        for coordinates in region.coords:
                            if rescue_seeds[coordinates[0], coordinates[1]] != 0:  # do r
                                correspondance_between_cur_seeds_and_safe_ones[region.label] = rescue_seeds[
                                    coordinates[0], coordinates[1]]
                                break
                            break

                _, tiles = Img.get_2D_tiles_with_overlap(final_seeds, overlap=64, dimension_h=-2, dimension_w=-1)

                for r in tiles:
                    for tile in r:
                        rps2 = regionprops(tile)
                        for region in rps2:
                            if self.stop_now:
                                return

                            if region.label in border_cells:
                                continue

                            if (region.bbox[0] <= 3 or region.bbox[1] <= 3 or region.bbox[2] >= final_seeds.shape[
                                -2] - 5 or
                                    region.bbox[
                                        3] >= \
                                    final_seeds.shape[-1] - 5):
                                continue

                            area_of_neighboring_cells = []
                            for region2 in rps2:
                                if region2.label == region.label:
                                    continue
                                # find all cells with
                                if self.rect_distance(region.bbox, region2.bbox) <= 1:
                                    area_of_neighboring_cells.append(labels_n_area[region2.label])

                            if area_of_neighboring_cells:
                                median = statistics.median_low(area_of_neighboring_cells)
                                ids_n_local_median[
                                    region.label] = median / correction_factor
                                if region.area <= median / correction_factor:
                                    removed_seeds.append(region.label)
                                else:
                                    keep_seeds.append(region.label)
                removed_seeds = [x for x in removed_seeds if x not in keep_seeds]

                # TODO offer the things below as an option --> prevent removal of sure seeds or something like that
                if restore_safe_cells:
                    removed_seeds_to_restore = []
                    for region in regionprops(final_seeds):
                        if region.label in removed_seeds:
                            first = True
                            for coordinates in region.coords:
                                if first and rescue_seeds[coordinates[0], coordinates[1]] != 0:
                                    percent_diff = min(labels_n_area[region.label], labels_n_area_rescue_seeds[
                                        rescue_seeds[coordinates[0], coordinates[1]]]) / max(
                                        labels_n_area[region.label], labels_n_area_rescue_seeds[
                                            rescue_seeds[coordinates[0], coordinates[1]]])

                                    if (percent_diff >= 0.7 and percent_diff < 1.0) or (
                                            labels_n_area[region.label] <= 200 and (
                                            percent_diff >= 0.3 and percent_diff < 1.0)):
                                        if _DEBUG:
                                            print('0 finally not removing seed, safe seed', region.label,
                                                  percent_diff,
                                                  labels_n_area[region.label],
                                                  labels_n_area_rescue_seeds[
                                                      rescue_seeds[coordinates[0], coordinates[1]]],
                                                  labels_n_area[region.label] / labels_n_area_rescue_seeds[
                                                      rescue_seeds[coordinates[0], coordinates[1]]],
                                                  region.centroid)
                                        removed_seeds_to_restore.append(region.label)
                                        break
                                    break
                    removed_seeds = [x for x in removed_seeds if x not in removed_seeds_to_restore]
            else:
                areas = []

                for region in regionprops(final_seeds):
                    if (region.bbox[0] <= 3 or region.bbox[1] <= 3 or region.bbox[2] >= final_seeds.shape[-2] - 5 or
                            region.bbox[3] >= final_seeds.shape[-1] - 5):
                        continue
                    avg_area += region.area
                    count += 1
                    areas.append(region.area)
                avg_area /= count

                median = statistics.median_low(areas)

                if isinstance(filter, int):
                    filter_by_size = filter
                elif 'avg' in filter:
                    filter_by_size = avg_area / correction_factor
                elif 'median' in filter:
                    filter_by_size = median / correction_factor
                # TODO maybe use stdev or alike to see if cell should really be removed
                if _DEBUG:
                    print('filter cells below=', filter_by_size, 'avg cell area=', avg_area, 'median=',
                          median)  # , 'median', median

                if filter_by_size is not None and filter_by_size != 0:

                    if _VISUAL_DEBUG:
                        plt.imshow(final_seeds)
                        plt.show()

                    for region in regionprops(final_seeds):
                        labels_n_bbox[region.label] = region.bbox
                        labels_n_area[region.label] = region.area
                        if region.area < filter_by_size:
                            if (region.bbox[0] <= 2 or region.bbox[1] <= 2 or region.bbox[2] >= labs.shape[
                                -2] - 3 or
                                    region.bbox[
                                        3] >= \
                                    labs.shape[
                                        -1] - 3):
                                continue
                            removed_seeds.append(region.label)

            if cutoff_cell_fusion is not None and cutoff_cell_fusion > 1:
                cells_to_fuse = []

                for idx, removed_seed in enumerate(removed_seeds):
                    current_cells_to_fuse = set()
                    closest_pair = None
                    smallest_distance = None

                    for idx2 in range(idx + 1, len(removed_seeds)):
                        removed_seed2 = removed_seeds[idx2]

                        if closest_pair is None:
                            if self.rect_distance(labels_n_bbox[removed_seed], labels_n_bbox[removed_seed2]) <= 1:
                                closest_pair = removed_seed2
                                smallest_distance = self.rect_distance(labels_n_bbox[removed_seed],
                                                                       labels_n_bbox[removed_seed2])
                        elif self.rect_distance(labels_n_bbox[removed_seed],
                                                labels_n_bbox[removed_seed2]) <= smallest_distance:
                            closest_pair = removed_seed2
                            smallest_distance = self.rect_distance(labels_n_bbox[removed_seed],
                                                                   labels_n_bbox[removed_seed2])

                        if self.rect_distance(labels_n_bbox[removed_seed], labels_n_bbox[removed_seed2]) <= 1:
                            current_cells_to_fuse.add(removed_seed)
                            current_cells_to_fuse.add(removed_seed2)

                    if current_cells_to_fuse:
                        cells_to_fuse.append(current_cells_to_fuse)

                cells_to_fuse = [frozenset(i) for i in cells_to_fuse]
                cells_to_fuse = list(dict.fromkeys(cells_to_fuse))

                cells_to_keep = []
                if cutoff_cell_fusion is not None and cutoff_cell_fusion > 0:
                    superfuse = []

                    copy_of_cells_to_fuse = cells_to_fuse.copy()
                    for idx, fuse in enumerate(copy_of_cells_to_fuse):
                        current_fusion = set(fuse.copy())
                        changed = True
                        while changed:
                            changed = False
                            for idx2 in range(len(copy_of_cells_to_fuse) - 1, idx, -1):
                                fuse2 = copy_of_cells_to_fuse[idx2]
                                if idx2 == idx:
                                    continue
                                if fuse2.intersection(current_fusion):
                                    current_fusion.update(fuse2)
                                    del copy_of_cells_to_fuse[idx2]
                                    changed = True
                        superfuse.append(current_fusion)

                    for sf in superfuse:
                        if len(sf) > cutoff_cell_fusion:
                            for val in sf:
                                cells_to_keep.append(val)

                seeds_to_fuse = []

                cells_to_fuse = sorted(cells_to_fuse, key=len)
                for fuse in cells_to_fuse:
                    cumulative_area = 0
                    for _id in fuse:
                        if _id in cells_to_keep:
                            if _id in removed_seeds:
                                removed_seeds.remove(_id)
                            continue
                        cumulative_area += labels_n_area[_id]
                    if filter_by_size is not None:
                        if cumulative_area >= filter_by_size:  #: #1200: #filter_by_size: # need hack this to get local area
                            seeds_to_fuse.append(fuse)
                            for _id in fuse:
                                if _id in removed_seeds:
                                    removed_seeds.remove(_id)
                    else:
                        if cumulative_area >= ids_n_local_median[_id]:
                            seeds_to_fuse.append(fuse)
                            for _id in fuse:
                                if _id in removed_seeds:
                                    removed_seeds.remove(_id)

                # need recolor all the seeds in there with the new seed stuff
                for fuse in seeds_to_fuse:
                    for _id in fuse:
                        break
                    for region in regionprops(final_seeds):
                        if region.label in fuse:
                            for coordinates in region.coords:
                                final_seeds[coordinates[0], coordinates[1]] = _id

            if _VISUAL_DEBUG:
                plt.imshow(final_seeds)
                plt.show()

            for region in regionprops(final_seeds):
                if region.label in removed_seeds:
                    for coordinates in region.coords:
                        final_seeds[coordinates[0], coordinates[1]] = 0
            if _VISUAL_DEBUG:
                plt.imshow(final_seeds)
                plt.show()

            if _VISUAL_DEBUG:
                plt.imshow(final_seeds)
                plt.show()

            final_wshed = watershed(img_orig, markers=final_seeds, watershed_line=True)

            final_wshed[final_wshed != 0] = 1  # remove all seeds
            final_wshed[final_wshed == 0] = 255  # set wshed values to 255
            final_wshed[final_wshed == 1] = 0  # set all other cell content to
            if _VISUAL_DEBUG:
                plt.imshow(final_wshed)
                plt.show()
            # print('saving', filename_to_use_to_save)
            # Img(final_wshed.astype(np.uint8), dimensions='hw').save(filename_to_use_to_save)

            duration = timer() - start
            if _DEBUG:
                print('final duration wshed in secs', duration)

            return final_wshed.astype(np.uint8)  # is indeed a 2D image
コード例 #33
0
ファイル: grid.py プロジェクト: NengLu/topopy
    def fill_sinks2(self, four_way=False):
        """
        Fill sinks method adapted from  fill depressions/sinks in floating point array
        
        Parameters:
        ----------
        input_array : [ndarray] Input array to be filled
        four_way : [bool] Searchs the 4 (True) or 8 (False) adjacent cells
        
        Returns:
        ----------
        [ndarray] Filled array
    
        This algorithm has been adapted (with minor modifications) from the 
        Charles Morton slow fill algorithm (with ndimage and python 3 was not slow
        at all). 
        
        References
        ----------
        Soile, P., Vogt, J., and Colombo, R., 2003. Carving and Adaptive
        Drainage Enforcement of Grid Digital Elevation Models.
        Water Resources Research, 39(12), 1366
        
        Soille, P., 1999. Morphological Image Analysis: Principles and
        Applications, Springer-Verlag, pp. 173-174
    
        """
        # Change nan values to a very low value
        copyarr = np.copy(self._array)
        nodata_pos = self.get_nodata_pos()
        copyarr[nodata_pos] = -9999.

        # Set h_max to a value larger than the array maximum to ensure
        #   that the while loop will terminate
        h_max = copyarr.max() + 100

        # Build mask of cells with data not on the edge of the image
        # Use 3x3 square Structuring element
        inside_mask = ndimage.morphology.binary_erosion(
            np.isfinite(copyarr),
            structure=np.array([[1, 1, 1], [1, 1, 1], [1, 1,
                                                       1]]).astype(np.bool))

        # Initialize output array as max value test_array except edges
        output_array = np.copy(copyarr)
        output_array[inside_mask] = h_max

        # Array for storing previous iteration
        output_old_array = np.copy(copyarr)
        output_old_array[:] = 0

        # Cross structuring element
        if four_way:
            el = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]).astype(np.bool)
        else:
            el = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]).astype(np.bool)

        # Iterate until marker array doesn't change
        while not np.array_equal(output_old_array, output_array):
            output_old_array = np.copy(output_array)
            output_array = np.maximum(
                copyarr,
                ndimage.grey_erosion(output_array, size=(3, 3), footprint=el))

        # Put back nodata values and change type
        if self._nodata:
            output_array[nodata_pos] = self._nodata
        # Create output filled DEM
        filled_dem = DEM()
        filled_dem.copy_layout(self)
        filled_dem.set_array(output_array)
        filled_dem.set_nodata(self._nodata)
        return filled_dem
コード例 #34
0
def get_connections(seg_file, syn_seg_file, pre_synaptic_probs):
    '''Get the detected synaptic connections
    
    :param seg_file: the .h5 segmentation
    :param syn_seg_file: the .h5 segmentation of the synapses
    :param pre_synaptic_probs: the .h5 probability maps classifying
    voxels as pre-synaptic
    '''
    #
    # The strategy:
    #
    # * sum the probability map within the synapse regions to get
    #   the average strength of the signal within each neuron
    # * find only the border pixels of the segmentations
    # * overlay with synapses to get only border pixels within synapses
    # * use np.bincount to compute average x, y and z
    #
    with h5py.File(seg_file, "r") as fd:
        seg_volume = fd[fd.keys()[0]][:]
    with h5py.File(syn_seg_file, "r") as fd:
        synseg_volume = fd[fd.keys()[0]][:]
    ############################################
    #
    # Find the neuron pairs.
    #
    ############################################
    z, y, x = np.where(synseg_volume > 0)
    seg, synseg = seg_volume[z, y, x], synseg_volume[z, y, x]
    matrix = coo_matrix((np.ones(len(z)), (synseg, seg)))
    matrix.sum_duplicates()
    synapse_labels, neuron_labels = matrix.nonzero()
    counts = matrix.tocsr()[synapse_labels, neuron_labels].getA1()
    #
    # Order by synapse label and -count to get the neurons with
    # the highest count first
    #
    order = np.lexsort((-counts, synapse_labels))
    counts, neuron_labels, synapse_labels = \
        [_[order] for _ in counts, neuron_labels, synapse_labels]
    first = np.hstack(
        [[True], synapse_labels[:-1] != synapse_labels[1:], [True]])
    idx = np.where(first)[0]
    per_synapse_counts = idx[1:] - idx[:-1]
    #
    # Get rid of counts < 2
    #
    mask = per_synapse_counts >= 2
    idx = idx[:-1][mask]
    #
    # pick out the first and second most overlapping neurons and
    # their synapse.
    #
    neuron_1 = neuron_labels[idx]
    synapses = synapse_labels[idx]
    neuron_2 = neuron_labels[idx+1]
    ###################################
    # 
    # Determine polarity
    #
    ###################################
    with h5py.File(pre_synaptic_probs, "r") as fd:
        probs = fd[fd.keys()[0]][:][z, y, x]
    #
    # Start by making a matrix to transform the map.
    #
    matrix = coo_matrix(
        (np.arange(len(idx)*2) + 1,
         (np.hstack((neuron_1, neuron_2)),
          np.hstack((synapses, synapses)))),
        shape=(np.max(seg)+1, np.max(synseg) + 1)).tocsr()
    #
    # Convert the neuron / synapse map to the mapping labels
    #
    mapping_labeling = matrix[seg, synseg].A1
    #
    # Score each synapse / label overlap on both the transmitter
    # and receptor probabilities
    #
    areas = np.bincount(mapping_labeling)
    transmitter_score = np.bincount(
            mapping_labeling, probs, minlength=len(areas)) / areas
    del probs
    score_1 = transmitter_score[1:len(idx)+1]
    score_2 = transmitter_score[len(idx)+1:]
    #
    # Flip the scores and neuron assignments if score_2 > score_1
    #
    flippers = score_2 > score_1
    score_1[flippers], score_2[flippers] = \
        score_2[flippers], score_1[flippers]
    neuron_1[flippers], neuron_2[flippers] = \
        neuron_2[flippers], neuron_1[flippers]
    ##########################################################
    #
    # Compute synapse centers
    #
    ##########################################################
    edge_z, edge_y, edge_x = np.where(
        (grey_dilation(seg, size=3) != grey_erosion(seg_volume, size=3)) &\
        (synseg_volume != 0))
    areas = np.bincount(synseg_volume[edge_z, edge_y, edge_x])
    xc, yc, zc = [np.bincount(synseg_volume[edge_z, edge_y, edge_x], _)
                  for _ in edge_x, edge_y, edge_z]
    result = dict(neuron_1=neuron_1,
                  neuron_2=neuron_2,
                  synapse_center=dict(x=xc[synapses]/areas[synapses],
                                      y=yc[synapses]/areas[synapses],
                                      z=zc[synapses]/areas[synapses]))
    return result
コード例 #35
0
 def run(self, workspace):
     '''Run the module on the image set'''
     seed_objects_name = self.seed_objects_name.value
     skeleton_name = self.image_name.value
     seed_objects = workspace.object_set.get_objects(seed_objects_name)
     labels = seed_objects.segmented
     labels_count = np.max(labels)
     label_range = np.arange(labels_count,dtype=np.int32)+1
     
     skeleton_image = workspace.image_set.get_image(
         skeleton_name, must_be_binary = True)
     skeleton = skeleton_image.pixel_data
     if skeleton_image.has_mask:
         skeleton = skeleton & skeleton_image.mask
     try:
         labels = skeleton_image.crop_image_similarly(labels)
     except:
         labels, m1 = cpo.size_similarly(skeleton, labels)
         labels[~m1] = 0
     #
     # The following code makes a ring around the seed objects with
     # the skeleton trunks sticking out of it.
     #
     # Create a new skeleton with holes at the seed objects
     # First combine the seed objects with the skeleton so
     # that the skeleton trunks come out of the seed objects.
     #
     # Erode the labels once so that all of the trunk branchpoints
     # will be within the labels
     #
     #
     # Dilate the objects, then subtract them to make a ring
     #
     my_disk = morph.strel_disk(1.5).astype(int)
     dilated_labels = grey_dilation(labels, footprint=my_disk)
     seed_mask = dilated_labels > 0
     combined_skel = skeleton | seed_mask
     
     closed_labels = grey_erosion(dilated_labels,
                                  footprint = my_disk)
     seed_center = closed_labels > 0
     combined_skel = combined_skel & (~seed_center)
     #
     # Fill in single holes (but not a one-pixel hole made by
     # a one-pixel image)
     #
     if self.wants_to_fill_holes:
         def size_fn(area, is_object):
             return (~ is_object) and (area <= self.maximum_hole_size.value)
         combined_skel = morph.fill_labeled_holes(
             combined_skel, ~seed_center, size_fn)
     #
     # Reskeletonize to make true branchpoints at the ring boundaries
     #
     combined_skel = morph.skeletonize(combined_skel)
     #
     # The skeleton outside of the labels
     #
     outside_skel = combined_skel & (dilated_labels == 0)
     #
     # Associate all skeleton points with seed objects
     #
     dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                 dilated_labels,
                                                 combined_skel, 1)
     #
     # Get rid of any branchpoints not connected to seeds
     #
     combined_skel[dlabels == 0] = False
     #
     # Find the branchpoints
     #
     branch_points = morph.branchpoints(combined_skel)
     #
     # Odd case: when four branches meet like this, branchpoints are not
     # assigned because they are arbitrary. So assign them.
     #
     # .  .
     #  B.
     #  .B
     # .  .
     #
     odd_case = (combined_skel[:-1,:-1] & combined_skel[1:,:-1] &
                 combined_skel[:-1,1:] & combined_skel[1,1])
     branch_points[:-1,:-1][odd_case] = True
     branch_points[1:,1:][odd_case] = True
     #
     # Find the branching counts for the trunks (# of extra branches
     # eminating from a point other than the line it might be on).
     #
     branching_counts = morph.branchings(combined_skel)
     branching_counts = np.array([0,0,0,1,2])[branching_counts]
     #
     # Only take branches within 1 of the outside skeleton
     #
     dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
     branching_counts[~dilated_skel] = 0
     #
     # Find the endpoints
     #
     end_points = morph.endpoints(combined_skel)
     #
     # We use two ranges for classification here:
     # * anything within one pixel of the dilated image is a trunk
     # * anything outside of that range is a branch
     #
     nearby_labels = dlabels.copy()
     nearby_labels[distance_map > 1.5] = 0
     
     outside_labels = dlabels.copy()
     outside_labels[nearby_labels > 0] = 0
     #
     # The trunks are the branchpoints that lie within one pixel of
     # the dilated image.
     #
     if labels_count > 0:
         trunk_counts = fix(scind.sum(branching_counts, nearby_labels, 
                                      label_range)).astype(int)
     else:
         trunk_counts = np.zeros((0,),int)
     #
     # The branches are the branchpoints that lie outside the seed objects
     #
     if labels_count > 0:
         branch_counts = fix(scind.sum(branch_points, outside_labels, 
                                       label_range))
     else:
         branch_counts = np.zeros((0,),int)
     #
     # Save the endpoints
     #
     if labels_count > 0:
         end_counts = fix(scind.sum(end_points, outside_labels, label_range))
     else:
         end_counts = np.zeros((0,), int)
     #
     # Save measurements
     #
     m = workspace.measurements
     assert isinstance(m, cpmeas.Measurements)
     feature = "_".join((C_NEURON, F_NUMBER_TRUNKS, skeleton_name))
     m.add_measurement(seed_objects_name, feature, trunk_counts)
     feature = "_".join((C_NEURON, F_NUMBER_NON_TRUNK_BRANCHES, 
                         skeleton_name))
     m.add_measurement(seed_objects_name, feature, branch_counts)
     feature = "_".join((C_NEURON, F_NUMBER_BRANCH_ENDS, skeleton_name))
     m.add_measurement(seed_objects_name, feature, end_counts)
     #
     # Collect the graph information
     #
     if self.wants_neuron_graph:
         trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
         intensity_image = workspace.image_set.get_image(
             self.intensity_image_name.value)
         edge_graph, vertex_graph = self.make_neuron_graph(
             combined_skel, dlabels, 
             trunk_mask,
             branch_points & ~trunk_mask,
             end_points,
             intensity_image.pixel_data)
         #
         # Add an image number column to both and change vertex index
         # to vertex number (one-based)
         #
         image_number = workspace.measurements.image_set_number
         vertex_graph = np.rec.fromarrays(
             (np.ones(len(vertex_graph)) * image_number,
              np.arange(1, len(vertex_graph) + 1),
              vertex_graph['i'],
              vertex_graph['j'],
              vertex_graph['labels'],
              vertex_graph['kind']),
             names = ("image_number", "vertex_number", "i", "j",
                      "labels", "kind"))
         
         edge_graph = np.rec.fromarrays(
             (np.ones(len(edge_graph)) * image_number,
              edge_graph["v1"],
              edge_graph["v2"],
              edge_graph["length"],
              edge_graph["total_intensity"]),
             names = ("image_number", "v1", "v2", "length", 
                      "total_intensity"))
         
         path = self.directory.get_absolute_path(m)
         edge_file = m.apply_metadata(self.edge_file_name.value)
         edge_path = os.path.abspath(os.path.join(path, edge_file))
         vertex_file = m.apply_metadata(self.vertex_file_name.value)
         vertex_path = os.path.abspath(os.path.join(path, vertex_file))
         d = self.get_dictionary(workspace.image_set_list)
         for file_path, table, fmt in (
             (edge_path, edge_graph, "%d,%d,%d,%d,%.4f"),
             (vertex_path, vertex_graph, "%d,%d,%d,%d,%d,%s")):
             #
             # Delete files first time through / otherwise append
             #
             if not d.has_key(file_path):
                 d[file_path] = True
                 if os.path.exists(file_path):
                     if workspace.frame is not None:
                         import wx
                         if wx.MessageBox(
                             "%s already exists. Do you want to overwrite it?" %
                             file_path, "Warning: overwriting file",
                             style = wx.YES_NO, 
                             parent = workspace.frame) != wx.YES:
                             raise ValueError("Can't overwrite %s" % file_path)
                     os.remove(file_path)
                 fd = open(file_path, 'wt')
                 header = ','.join(table.dtype.names)
                 fd.write(header + '\n')
             else:
                 fd = open(file_path, 'at')
             np.savetxt(fd, table, fmt)
             fd.close()
             if workspace.frame is not None:
                 workspace.display_data.edge_graph = edge_graph
                 workspace.display_data.vertex_graph = vertex_graph
     #
     # Make the display image
     #
     if workspace.frame is not None or self.wants_branchpoint_image:
         branchpoint_image = np.zeros((skeleton.shape[0],
                                       skeleton.shape[1],
                                       3))
         trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
         branch_mask = branch_points & (outside_labels != 0)
         end_mask = end_points & (outside_labels != 0)
         branchpoint_image[outside_skel,:] = 1
         branchpoint_image[trunk_mask | branch_mask | end_mask,:] = 0
         branchpoint_image[trunk_mask,0] = 1
         branchpoint_image[branch_mask,1] = 1
         branchpoint_image[end_mask, 2] = 1
         branchpoint_image[dilated_labels != 0,:] *= .875
         branchpoint_image[dilated_labels != 0,:] += .1
         if workspace.frame:
             workspace.display_data.branchpoint_image = branchpoint_image
         if self.wants_branchpoint_image:
             bi = cpi.Image(branchpoint_image,
                            parent_image = skeleton_image)
             workspace.image_set.add(self.branchpoint_image_name.value, bi)
コード例 #36
0
    def __call__(self,
                 img: np.ndarray,
                 mode: Optional[str] = None,
                 radius: Optional[int] = None,
                 binary: Optional[bool] = None) -> np.ndarray:
        """
        Apply the transform to `img`.

        """
        self.mode = self.mode if mode is None else mode
        self.radius = self.radius if radius is None else radius
        self.binary = self.binary if binary is None else binary

        input_ndim = img.squeeze().ndim  # spatial ndim
        if input_ndim == 2:
            structure = ndi.generate_binary_structure(2, 1)
        elif input_ndim == 3:
            structure = ndi.generate_binary_structure(3, 1)
        else:
            raise ValueError(
                f'Currently only support 2D&3D data, but got image with shape of {img.shape}'
            )

        channel_dim = None
        if input_ndim != img.ndim:
            channel_dim = img.shape.index(1)
            img = img.squeeze()

        if self.mode == 'closing':
            if self.binary:
                img = ndi.binary_closing(img,
                                         structure=structure,
                                         iterations=self.radius)
            else:
                for _ in range(self.radius):
                    img = ndi.grey_closing(img, footprint=structure)
        elif self.mode == 'dilation':
            if self.binary:
                img = ndi.binary_dilation(img,
                                          structure=structure,
                                          iterations=self.radius)
            else:
                for _ in range(self.radius):
                    img = ndi.grey_dilation(img, footprint=structure)
        elif self.mode == 'erosion':
            if self.binary:
                img = ndi.binary_erosion(img,
                                         structure=structure,
                                         iterations=self.radius)
            else:
                for _ in range(self.radius):
                    img = ndi.grey_erosion(img, footprint=structure)
        elif self.mode == 'opening':
            if self.binary:
                img = ndi.binary_opening(img,
                                         structure=structure,
                                         iterations=self.radius)
            else:
                for _ in range(self.radius):
                    img = ndi.grey_opening(img, footprint=structure)
        else:
            raise ValueError(f'Unexpected keyword {self.mode}')

        if channel_dim is not None:
            return np.expand_dims(img, axis=channel_dim)
        else:
            return img
コード例 #37
0
    def run(self, workspace):
        '''Run the module on the image set'''
        seed_objects_name = self.seed_objects_name.value
        skeleton_name = self.image_name.value
        seed_objects = workspace.object_set.get_objects(seed_objects_name)
        labels = seed_objects.segmented
        labels_count = np.max(labels)
        label_range = np.arange(labels_count, dtype=np.int32) + 1

        skeleton_image = workspace.image_set.get_image(skeleton_name,
                                                       must_be_binary=True)
        skeleton = skeleton_image.pixel_data
        if skeleton_image.has_mask:
            skeleton = skeleton & skeleton_image.mask
        try:
            labels = skeleton_image.crop_image_similarly(labels)
        except:
            labels, m1 = cpo.size_similarly(skeleton, labels)
            labels[~m1] = 0
        #
        # The following code makes a ring around the seed objects with
        # the skeleton trunks sticking out of it.
        #
        # Create a new skeleton with holes at the seed objects
        # First combine the seed objects with the skeleton so
        # that the skeleton trunks come out of the seed objects.
        #
        # Erode the labels once so that all of the trunk branchpoints
        # will be within the labels
        #
        #
        # Dilate the objects, then subtract them to make a ring
        #
        my_disk = morph.strel_disk(1.5).astype(int)
        dilated_labels = grey_dilation(labels, footprint=my_disk)
        seed_mask = dilated_labels > 0
        combined_skel = skeleton | seed_mask

        closed_labels = grey_erosion(dilated_labels, footprint=my_disk)
        seed_center = closed_labels > 0
        combined_skel = combined_skel & (~seed_center)
        #
        # Fill in single holes (but not a one-pixel hole made by
        # a one-pixel image)
        #
        if self.wants_to_fill_holes:

            def size_fn(area, is_object):
                return (~is_object) and (area <= self.maximum_hole_size.value)

            combined_skel = morph.fill_labeled_holes(combined_skel,
                                                     ~seed_center, size_fn)
        #
        # Reskeletonize to make true branchpoints at the ring boundaries
        #
        combined_skel = morph.skeletonize(combined_skel)
        #
        # The skeleton outside of the labels
        #
        outside_skel = combined_skel & (dilated_labels == 0)
        #
        # Associate all skeleton points with seed objects
        #
        dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                    dilated_labels,
                                                    combined_skel, 1)
        #
        # Get rid of any branchpoints not connected to seeds
        #
        combined_skel[dlabels == 0] = False
        #
        # Find the branchpoints
        #
        branch_points = morph.branchpoints(combined_skel)
        #
        # Odd case: when four branches meet like this, branchpoints are not
        # assigned because they are arbitrary. So assign them.
        #
        # .  .
        #  B.
        #  .B
        # .  .
        #
        odd_case = (combined_skel[:-1, :-1] & combined_skel[1:, :-1]
                    & combined_skel[:-1, 1:] & combined_skel[1, 1])
        branch_points[:-1, :-1][odd_case] = True
        branch_points[1:, 1:][odd_case] = True
        #
        # Find the branching counts for the trunks (# of extra branches
        # eminating from a point other than the line it might be on).
        #
        branching_counts = morph.branchings(combined_skel)
        branching_counts = np.array([0, 0, 0, 1, 2])[branching_counts]
        #
        # Only take branches within 1 of the outside skeleton
        #
        dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
        branching_counts[~dilated_skel] = 0
        #
        # Find the endpoints
        #
        end_points = morph.endpoints(combined_skel)
        #
        # We use two ranges for classification here:
        # * anything within one pixel of the dilated image is a trunk
        # * anything outside of that range is a branch
        #
        nearby_labels = dlabels.copy()
        nearby_labels[distance_map > 1.5] = 0

        outside_labels = dlabels.copy()
        outside_labels[nearby_labels > 0] = 0
        #
        # The trunks are the branchpoints that lie within one pixel of
        # the dilated image.
        #
        if labels_count > 0:
            trunk_counts = fix(
                scind.sum(branching_counts, nearby_labels,
                          label_range)).astype(int)
        else:
            trunk_counts = np.zeros((0, ), int)
        #
        # The branches are the branchpoints that lie outside the seed objects
        #
        if labels_count > 0:
            branch_counts = fix(
                scind.sum(branch_points, outside_labels, label_range))
        else:
            branch_counts = np.zeros((0, ), int)
        #
        # Save the endpoints
        #
        if labels_count > 0:
            end_counts = fix(scind.sum(end_points, outside_labels,
                                       label_range))
        else:
            end_counts = np.zeros((0, ), int)
        #
        # Save measurements
        #
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        feature = "_".join((C_NEURON, F_NUMBER_TRUNKS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, trunk_counts)
        feature = "_".join(
            (C_NEURON, F_NUMBER_NON_TRUNK_BRANCHES, skeleton_name))
        m.add_measurement(seed_objects_name, feature, branch_counts)
        feature = "_".join((C_NEURON, F_NUMBER_BRANCH_ENDS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, end_counts)
        #
        # Collect the graph information
        #
        if self.wants_neuron_graph:
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            intensity_image = workspace.image_set.get_image(
                self.intensity_image_name.value)
            edge_graph, vertex_graph = self.make_neuron_graph(
                combined_skel, dlabels, trunk_mask,
                branch_points & ~trunk_mask, end_points,
                intensity_image.pixel_data)
            #
            # Add an image number column to both and change vertex index
            # to vertex number (one-based)
            #
            image_number = workspace.measurements.image_set_number
            vertex_graph = np.rec.fromarrays(
                (np.ones(len(vertex_graph)) * image_number,
                 np.arange(1,
                           len(vertex_graph) + 1), vertex_graph['i'],
                 vertex_graph['j'], vertex_graph['labels'],
                 vertex_graph['kind']),
                names=("image_number", "vertex_number", "i", "j", "labels",
                       "kind"))

            edge_graph = np.rec.fromarrays(
                (np.ones(len(edge_graph)) * image_number, edge_graph["v1"],
                 edge_graph["v2"], edge_graph["length"],
                 edge_graph["total_intensity"]),
                names=("image_number", "v1", "v2", "length",
                       "total_intensity"))

            path = self.directory.get_absolute_path(m)
            edge_file = m.apply_metadata(self.edge_file_name.value)
            edge_path = os.path.abspath(os.path.join(path, edge_file))
            vertex_file = m.apply_metadata(self.vertex_file_name.value)
            vertex_path = os.path.abspath(os.path.join(path, vertex_file))
            d = self.get_dictionary(workspace.image_set_list)
            for file_path, table, fmt in ((edge_path, edge_graph,
                                           "%d,%d,%d,%d,%.4f"),
                                          (vertex_path, vertex_graph,
                                           "%d,%d,%d,%d,%d,%s")):
                #
                # Delete files first time through / otherwise append
                #
                if not d.has_key(file_path):
                    d[file_path] = True
                    if os.path.exists(file_path):
                        if workspace.frame is not None:
                            import wx
                            if wx.MessageBox(
                                    "%s already exists. Do you want to overwrite it?"
                                    % file_path,
                                    "Warning: overwriting file",
                                    style=wx.YES_NO,
                                    parent=workspace.frame) != wx.YES:
                                raise ValueError("Can't overwrite %s" %
                                                 file_path)
                        os.remove(file_path)
                    fd = open(file_path, 'wt')
                    header = ','.join(table.dtype.names)
                    fd.write(header + '\n')
                else:
                    fd = open(file_path, 'at')
                np.savetxt(fd, table, fmt)
                fd.close()
                if workspace.frame is not None:
                    workspace.display_data.edge_graph = edge_graph
                    workspace.display_data.vertex_graph = vertex_graph
        #
        # Make the display image
        #
        if workspace.frame is not None or self.wants_branchpoint_image:
            branchpoint_image = np.zeros(
                (skeleton.shape[0], skeleton.shape[1], 3))
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            branch_mask = branch_points & (outside_labels != 0)
            end_mask = end_points & (outside_labels != 0)
            branchpoint_image[outside_skel, :] = 1
            branchpoint_image[trunk_mask | branch_mask | end_mask, :] = 0
            branchpoint_image[trunk_mask, 0] = 1
            branchpoint_image[branch_mask, 1] = 1
            branchpoint_image[end_mask, 2] = 1
            branchpoint_image[dilated_labels != 0, :] *= .875
            branchpoint_image[dilated_labels != 0, :] += .1
            if workspace.frame:
                workspace.display_data.branchpoint_image = branchpoint_image
            if self.wants_branchpoint_image:
                bi = cpi.Image(branchpoint_image, parent_image=skeleton_image)
                workspace.image_set.add(self.branchpoint_image_name.value, bi)
コード例 #38
0
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.objects_name.value)
        assert isinstance(objects, cpo.Objects)
        labels = objects.segmented
        half_window_size = self.window_size.value
        threshold = self.threshold.value
        output_labels = objects.segmented.copy()
        max_objects = np.max(output_labels)
        indices = np.arange(max_objects+1)

        # Get object slices
        object_slices = morph.fixup_scipy_ndimage_result(scind.measurements.find_objects(output_labels))

        # Calculate perimeters
        perimeters = morph.calculate_perimeters(output_labels, indices)

        # Find the neighbors
        neighbors = np.zeros((max_objects+1, max_objects+1), bool) # neighbors[i,j] = True when object j is a neighbor of object i.
        lmax = scind.grey_dilation(output_labels, footprint=np.ones((3,3), bool)) # lower pixel values will be replaced by adjacent larger pixel values
        lbig = output_labels.copy()
        lbig[lbig == 0] = np.iinfo(output_labels.dtype).max # set the background to be large so that it is ignored next
        lmin = scind.grey_erosion(lbig, footprint=np.ones((3,3), bool)) # larger pixel values will be replaced by adjacent smaller pixel values
        
        for i in range(1, max_objects+1):
            object_bounds = (object_slices[i-1][0],object_slices[i-1][1])
            object_map = output_labels[object_bounds] == i # The part of the slice that contains the object
            
            lower_neighbors = np.unique(lmin[object_bounds][object_map])
            higher_neighbors = np.unique(lmax[object_bounds][object_map])

            for neighbor_list in [lower_neighbors, higher_neighbors]:
                for j in range(0, len(neighbor_list)):
                    neighbors[i,neighbor_list[j]] = True
            neighbors[i,i] = False
        
        # Generate window dimensions for each location (necessary for the edges)
        dim1_window = np.ones(output_labels.shape, int) * half_window_size
        dim2_window = np.ones(output_labels.shape, int) * half_window_size
        for i in range(0, half_window_size):
            dim1_window[i:output_labels.shape[0]-i,0:output_labels.shape[1]] += 1
            dim2_window[0:output_labels.shape[0],i:output_labels.shape[1]-i] += 1
        
        # Loop over all objects
        for k in range(1, max_objects+1):
            if (perimeters[k] == 0) or not np.max(neighbors[k]): # Has been removed by a merge or has no neighbors
                continue

            k_bounds_array = object_slices[k-1]
            k_dim1_bounds = k_bounds_array[0].indices(output_labels.shape[0])
            k_dim2_bounds = k_bounds_array[1].indices(output_labels.shape[1])
            
            # Loop over all neighbors of object k
            l = 0
            while l < max_objects:
                l += 1
                
                if k == l or (perimeters[l] == 0) or not neighbors[k,l]: # Has been removed by a merge or is not a neighbor of object k
                    continue

                l_bounds_array = object_slices[l-1]
                l_dim1_bounds = l_bounds_array[0].indices(output_labels.shape[0])
                l_dim2_bounds = l_bounds_array[1].indices(output_labels.shape[1])

                kl_dim1_min_bound = min(k_dim1_bounds[0], l_dim1_bounds[0])
                kl_dim1_max_bound = max(k_dim1_bounds[1], l_dim1_bounds[1])
                kl_dim2_min_bound = min(k_dim2_bounds[0], l_dim2_bounds[0])
                kl_dim2_max_bound = max(k_dim2_bounds[1], l_dim2_bounds[1])

                kl_bounds = (slice(kl_dim1_min_bound, kl_dim1_max_bound), slice(kl_dim2_min_bound, kl_dim2_max_bound))
                kl_shape = (kl_dim1_max_bound - kl_dim1_min_bound, kl_dim2_max_bound - kl_dim2_min_bound)
                
                #
                # Find the vertices of object pair k, l
                #
                vertices = np.zeros(kl_shape, bool)
                isAdjacent = np.zeros(output_labels.shape, bool)
                isBorder = np.zeros(output_labels.shape, bool)
                
                for i in range(-1,2):
                    ki_min = 0
                    ki_max = 0
                    li_min = 0
                    li_max = 0
                    if k_dim1_bounds[0] + i < 0:
                        ki_min = -i
                    else:
                        ki_min = k_dim1_bounds[0]

                    if l_dim1_bounds[0] + i < 0:
                        li_min = -i
                    else:
                        li_min = l_dim1_bounds[0]

                    if k_dim1_bounds[1] + i > output_labels.shape[0]:
                        ki_max = output_labels.shape[0] - i
                    else:
                        ki_max = k_dim1_bounds[1]

                    if l_dim1_bounds[1] + i > output_labels.shape[0]:
                        li_max = output_labels.shape[0] - i
                    else:
                        li_max = l_dim1_bounds[1]

                    ki_slice = slice(ki_min, ki_max)
                    li_slice = slice(li_min, li_max)
                    ki_test_slice = slice(ki_min+i, ki_max+i)
                    li_test_slice = slice(li_min+i, li_max+i)
                    
                    for j in range(-1,2):
                        if i == j == 0:
                            continue

                        kj_min = 0
                        kj_max = 0
                        lj_min = 0
                        lj_max = 0
                        if k_dim2_bounds[0] + j < 0:
                            kj_min = -j
                        else:
                            kj_min = k_dim2_bounds[0]

                        if l_dim2_bounds[0] + j < 0:
                            lj_min = -j
                        else:
                            lj_min = l_dim2_bounds[0]

                        if k_dim2_bounds[1] + j > output_labels.shape[0]:
                            kj_max = output_labels.shape[0] - j
                        else:
                            kj_max = k_dim2_bounds[1]

                        if l_dim2_bounds[1] + j > output_labels.shape[0]:
                            lj_max = output_labels.shape[0] - j
                        else:
                            lj_max = l_dim2_bounds[1]

                        if (kj_min == kj_max or ki_min == ki_max) and (lj_min == lj_max or li_min == li_max):
                            continue

                        kj_slice = slice(kj_min, kj_max)
                        lj_slice = slice(lj_min, lj_max)
                        kj_test_slice = slice(kj_min+j, kj_max+j)
                        lj_test_slice = slice(lj_min+j, lj_max+j)

                        k_bounds = (ki_slice, kj_slice)
                        l_bounds = (li_slice, lj_slice)
                        k_test_bounds = (ki_test_slice, kj_test_slice)
                        l_test_bounds = (li_test_slice, lj_test_slice)
                        kl_mod_bounds = (slice(min(ki_min, li_min), max(ki_max, li_max)), slice(min(kj_min, lj_min), max(kj_max, lj_max)))

                        isAdjacentSlice = np.zeros(output_labels.shape, bool)
                        isAdjacentSlice[k_bounds] = np.logical_and(output_labels[k_bounds] == k,
                                                                   output_labels[k_test_bounds] == l)
                        isAdjacentSlice[l_bounds] = np.logical_or(isAdjacentSlice[l_bounds],
                                                                  np.logical_and(output_labels[l_bounds] == l,
                                                                                 output_labels[l_test_bounds] == k))
                        isAdjacent[kl_mod_bounds] = np.logical_or(isAdjacent[kl_mod_bounds],
                                                                  isAdjacentSlice[kl_mod_bounds])

                        isBorderSlice = np.zeros(output_labels.shape, bool)
                        isBorderSlice[k_bounds] = np.logical_and(output_labels[k_bounds] == k,
                                                                 np.logical_and(output_labels[k_test_bounds] != k,
                                                                                output_labels[k_test_bounds] != l))
                        isBorderSlice[l_bounds] = np.logical_or(isBorderSlice[l_bounds],
                                                                np.logical_and(output_labels[l_bounds] == l,
                                                                               np.logical_and(output_labels[l_test_bounds] != k,
                                                                                              output_labels[l_test_bounds] != l)))
                        isBorder[kl_mod_bounds] = np.logical_or(isBorder[kl_mod_bounds],
                                                                isBorderSlice[kl_mod_bounds])
                
                vertices = np.logical_and(isAdjacent[kl_bounds], isBorder[kl_bounds])

                #
                # Calculate the maximum vertex score for the pair
                #
                '''+1 for every non-I/J labeled pixel in the window
                +1 more for every non-I/J labeled pixel adjacent to it

                Divided by the score that would result if the objects were flat

                e.g. (vertex is starred)

                0  0  0  0  0  0  0
                0  0  0  0  0  0  0
                1  1  0  0  0  0  0
                1  1  1 *1* 2  2  0
                1  1  1  1  2  2  2
                1  1  1  1  2  2  2
                1  1  1  2  2  2  2

                If it were flat, it would be 7 * 3 = 21, so the divisor 7 * 6 = 42.
                In the case that the window is rectangular (such as at the image edge),
                we split the difference.
                Generalized:

                Vertex Score = (Sum of NotIJ) / [A * B - 0.5 * (A + B)]
                '''
                
                sum_not_IJ = np.zeros(kl_shape, int)
                for i in range(0, kl_shape[0]):
                    for j in range(0,kl_shape[1]):
                        if not vertices[i,j]:
                            continue

                        window_bounds = (slice(i - half_window_size + kl_dim1_min_bound, i + half_window_size + kl_dim1_min_bound),
                                         slice(j - half_window_size + kl_dim2_min_bound, j + half_window_size + kl_dim2_min_bound))

                        window_slice = output_labels[window_bounds]

                        sum_not_IJ[i,j] = count_nonzero(np.logical_and(window_slice != k,
                                                                     window_slice != l))
                        # Should be np.count_nonzero, but it doesn't exist in the version that comes with
                        # CellProfiler 2.0 r11710

                vertex_scores = sum_not_IJ / (dim1_window[kl_bounds] * dim2_window[kl_bounds] - 0.5 * (dim1_window[kl_bounds] + dim2_window[kl_bounds]))

                merged = np.zeros(kl_shape, int)
                merged[output_labels[kl_bounds] == k] = 1
                merged[output_labels[kl_bounds] == l] = 1

                merged_perimeter = morph.calculate_perimeters(merged, [1])

                seam_length = (perimeters[k] + perimeters[l] - merged_perimeter) / 2
                min_external_perimeter = min(perimeters[k], perimeters[l]) - seam_length
                adjusted_min_external_perimeter = min_external_perimeter / np.pi

                seam_fraction = seam_length / (adjusted_min_external_perimeter + seam_length)
                
                max_vertex_score = np.max(vertex_scores)

                score = (max_vertex_score + seam_fraction) / 2

                #
                # Join object pair with score above the threshold
                #
                if score >= threshold:
                    output_labels[output_labels == l] = k
                    
                    # Calculate new perimeters
                    perimeters[k] = merged_perimeter
                    perimeters[l] = 0

                    # Reconfigure neighbors
                    neighbors[k] = np.logical_or(neighbors[k], neighbors[l])
                    for x in range(1, max_objects+1): # Is there an array method to do this?
                        if neighbors[x,l]:
                            neighbors[x,k] = True
                    neighbors[0:max_objects,l] = False

                    # Recalculate bounds
                    k_dim1_bounds = kl_bounds[0].indices(output_labels.shape[0])
                    k_dim2_bounds = kl_bounds[1].indices(output_labels.shape[1])
                    
                    # Reset l to 0 so that we check all the neighbors against this new object
                    l = 0
                else:
                    pass
                        
        output_objects = cpo.Objects()
        output_objects.segmented = output_labels
        if objects.has_small_removed_segmented:
            output_objects.small_removed_segmented = \
                copy_labels(objects.small_removed_segmented, output_labels)
        if objects.has_unedited_segmented:
            output_objects.unedited_segmented = \
                copy_labels(objects.unedited_segmented, output_labels)
        output_objects.parent_image = objects.parent_image
        workspace.object_set.add_objects(output_objects, self.output_objects_name.value)
        
        measurements = workspace.measurements
        add_object_count_measurements(measurements,
                                      self.output_objects_name.value,
                                      np.max(output_objects.segmented))
        add_object_location_measurements(measurements,
                                         self.output_objects_name.value,
                                         output_objects.segmented)
        
        #
        # Relate the output objects to the input ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(output_objects)
        measurements.add_measurement(self.objects_name.value,
                                     FF_CHILDREN_COUNT % 
                                     self.output_objects_name.value,
                                     children_per_parent)
        measurements.add_measurement(self.output_objects_name.value,
                                     FF_PARENT%self.objects_name.value,
                                     parents_of_children)
        if self.wants_outlines:
            outlines = cellprofiler.cpmath.outline.outline(output_labels)
            outline_image = cpi.Image(outlines.astype(bool))
            workspace.image_set.add(self.outlines_name.value,
                                    outline_image)
                    
        if workspace.frame is not None:
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.output_labels = output_objects.segmented        
コード例 #39
0
    def outline_segments(self, mask_background=False):
        """
        Outline the labeled segments.

        The "outlines" represent the pixels *just inside* the segments,
        leaving the background pixels unmodified.

        Parameters
        ----------
        mask_background : bool, optional
            Set to `True` to mask the background pixels (labels = 0) in
            the returned array.  This is useful for overplotting the
            segment outlines.  The default is `False`.

        Returns
        -------
        boundaries : `~numpy.ndarray` or `~numpy.ma.MaskedArray`
            An array with the same shape of the segmentation array
            containing only the outlines of the labeled segments.  The
            pixel values in the outlines correspond to the labels in the
            segmentation array.  If ``mask_background`` is `True`, then
            a `~numpy.ma.MaskedArray` is returned.

        Examples
        --------
        >>> from photutils.segmentation import SegmentationImage
        >>> segm = SegmentationImage([[0, 0, 0, 0, 0, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 2, 2, 2, 2, 0],
        ...                           [0, 0, 0, 0, 0, 0]])
        >>> segm.outline_segments()
        array([[0, 0, 0, 0, 0, 0],
               [0, 2, 2, 2, 2, 0],
               [0, 2, 0, 0, 2, 0],
               [0, 2, 0, 0, 2, 0],
               [0, 2, 2, 2, 2, 0],
               [0, 0, 0, 0, 0, 0]])
        """
        from scipy.ndimage import (generate_binary_structure, grey_dilation,
                                   grey_erosion)

        # mode='constant' ensures outline is included on the array borders
        selem = generate_binary_structure(self._ndim, 1)  # edge connectivity
        eroded = grey_erosion(self.data,
                              footprint=selem,
                              mode='constant',
                              cval=0.)
        dilated = grey_dilation(self.data,
                                footprint=selem,
                                mode='constant',
                                cval=0.)

        outlines = ((dilated != eroded) & (self.data != 0)).astype(int)
        outlines *= self.data

        if mask_background:
            outlines = np.ma.masked_where(outlines == 0, outlines)

        return outlines
コード例 #40
0
ファイル: image_process.py プロジェクト: baishi/3dbar
    a1 = toNumpyArray(Image.open('../85.tiff').convert('L')).copy()
    a2 = toNumpyArray(Image.open('../86.tiff').convert('L')).copy()
    
    a1[a1>10] = 40
    a1[a1<=10]= 0
    a2[a2>10]=90
    a2[a2<=10]=0
    numpy.savetxt('a1.out', a1)
    numpy.savetxt('a2.out', a2)
    a1a2=numpy.logical_xor(a1,a2)
    numpy.savetxt('a1a2.out',numpy.logical_xor(a1,a2))
    print numpy.unique(numpy.logical_xor(a1,a2))
    print numpy.unique(a1)
    print numpy.unique(a2)
    footprint = ndimage.generate_binary_structure(2, 1)
    cnt1 = a1 - ndimage.grey_erosion(a1, footprint=footprint)
    cnt2 = a2 - ndimage.grey_erosion(a2, footprint=footprint) 
    cnt =  cnt1+cnt2
    cnt[cnt==numpy.amax(a1)+numpy.amax(a2)]= numpy.amax(a1)
    numpy.savetxt('cnt.out', cnt)

    numpy.savetxt('ics.out', a1a2)

    icscnt = a1+a2
    icscnt[a1a2!=0] = 255
    icscnt[icscnt!=255] = 0
    icscnt[cnt!=0] = cnt[cnt!=0]
    numpy.savetxt('start.out', icscnt)
    print numpy.unique(icscnt)

    w = icscnt
コード例 #41
0
ファイル: skimage.py プロジェクト: rosswhitfield/corelli
eros = erosion(signal, selem=ball(2))

plt.imshow(eros[251], vmax=5e-5)
plt.show()

plt.imshow(np.log(eros[251]))
plt.show()

fft = np.fft.fftshift(np.fft.fftn(np.fft.fftshift(eros)))
out = (fft * np.conj(fft)).real

plt.imshow(np.log(out[251]))
plt.show()

eros = ndi.grey_erosion(signal, footprint=ball(3))
eros2 = ndi.grey_erosion(image, footprint=disk(3))

opened = opening(image)
closed = closing(image)

opened = opening(signal)
closed = closing(signal)

gf = ndi.gaussian_filter(image, 1)
opened = opening(gf)
closed = closing(gf)
eros = erosion(gf)

# Finding local maxima
# http://scikit-image.org/docs/dev/auto_examples/segmentation/plot_peak_local_max.html
コード例 #42
0
def _create_world_composite(items,
                            lon_limits=None,
                            erosion_size=20,
                            smooth_width=20):
    # smooth_sigma = 4

    img = None
    for (path, area, timeslot) in items:

        if not isinstance(area, AreaDefinition):
            area = get_area_def(area)

        next_img = read_image(path, area, timeslot)

        if img is None:
            img = next_img
        else:
            # scaled_smooth_sigma = smooth_sigma * (float(img.width) / 1000.0)

            img_mask = reduce(np.ma.mask_or,
                              [chn.mask for chn in img.channels])
            next_img_mask = reduce(np.ma.mask_or,
                                   [chn.mask for chn in next_img.channels])

            # Mask overlapping areas away
            if lon_limits:
                for sat in lon_limits:
                    if sat in path:
                        mask_limits = calc_pixel_mask_limits(
                            area, lon_limits[sat])
                        for lim in mask_limits:
                            next_img_mask[:, lim[0]:lim[1]] = 1
                        break

            alpha = np.ones(next_img_mask.shape, dtype='float')
            alpha[next_img_mask] = 0.0

            if erosion_size is not None and smooth_width is not None:
                scaled_erosion_size = erosion_size * (float(img.width) /
                                                      1000.0)
                scaled_smooth_width = smooth_width * (float(img.width) /
                                                      1000.0)

                # smooth_alpha = ndi.gaussian_filter(
                #     ndi.grey_erosion(alpha, size=(scaled_erosion_size,
                #                                   scaled_erosion_size)),
                #        scaled_smooth_sigma)
                smooth_alpha = ndi.uniform_filter(
                    ndi.grey_erosion(alpha,
                                     size=(scaled_erosion_size,
                                           scaled_erosion_size)),
                    scaled_smooth_width)
                smooth_alpha[img_mask] = alpha[img_mask]
            else:
                smooth_alpha = alpha

            for i in range(0, min(len(img.channels), len(next_img.channels))):
                chdata = next_img.channels[i].data * smooth_alpha + \
                    img.channels[i].data * (1 - smooth_alpha)
                chmask = np.logical_and(img_mask, next_img_mask)
                img.channels[i] = \
                    np.ma.masked_where(chmask, chdata)

    return img
コード例 #43
0
# %%
# median filter
# img_blur = cv2.medianBlur(img, 3)
img_blur = ndimage.median_filter(img, 3)
show(img_blur, 'blur')
# %%
# test with t=128
res = g(img_blur, 128, 256, 0)
show(res, 'test')
# %%
t_ostu = ostu(img_blur)
res_ostu = g(img_blur, t_ostu, 256, 0)
show(res_ostu, 'ostu')
# %%
# erosion
img_eroded = ndimage.grey_erosion(img, 1)
show(img_eroded, 'erosion r = 1')
show(g(img_eroded, 128, 256, 0), 'test_erosion r = 1')
show(g(img_eroded, ostu(img_eroded), 256, 0), 'ostu_erosion r = 1')
img_eroded = ndimage.grey_erosion(img, 3)
show(img_eroded, 'erosion r = 3')
show(g(img_eroded, 128, 256, 0), 'test_erosion r = 3')
show(g(img_eroded, ostu(img_eroded), 256, 0), 'ostu_erosion r = 3')
img_eroded = ndimage.grey_erosion(img, 5)
show(img_eroded, 'erosion r = 5')
show(g(img_eroded, 128, 256, 0), 'test_erosion r = 5')
show(g(img_eroded, ostu(img_eroded), 256, 0), 'ostu_erosion r = 5')
# %%
img_dilated = ndimage.grey_dilation(img, 1)
show(img_dilated, 'dilatation r = 1')
show(g(img_dilated, 128, 256, 0), 'test_dilatation r = 1')
コード例 #44
0
    if counter == 0:
        print snapPrefix
    counter += 1
    model.load_state_dict(saved_state_dict)

    class_list = ['cow', 'horse']
    pytorch_list = []
    for class_ in class_list:
        gt_path = args['--testGTpath'] + class_

        img_list = next(os.walk(gt_path))[2]
        path = sketch_root + class_
        for i in img_list:

            img = cv2.imread(path + '/' + i)
            img = ndimage.grey_erosion(img[:, :, 0].astype(np.uint8),
                                       size=(2, 2))
            img = np.repeat(img[:, :, np.newaxis], 3, 2)
            gt = cv2.imread(gt_path + '/' + i, 0)
            output = model(
                Variable(torch.from_numpy(img[np.newaxis, :].transpose(
                    0, 3, 1, 2)).float(),
                         volatile=True).cuda(gpu0))
            interp = nn.UpsamplingBilinear2d(size=(321, 321))

            if args['--visualize']:
                output_temp = interp(output[3]).cpu().data[0].numpy()
                output_temp = output_temp.transpose(1, 2, 0)
                output_temp = np.argmax(output_temp, axis=2)
                plt.subplot(1, 3, 1)
                plt.imshow(img)
                plt.subplot(1, 3, 2)
コード例 #45
0
def main():
    structure = np.ones((3, 3, 3))
    #.............test1. dense.......
    filename = 'gyroidUniform.npy'
    input = np.load(filename, mmap_mode="r")

    print(
        "..............................dense.............................................."
    )

    #0.Nothing..............
    print("\n nothing testing...")
    output = vc.nothing(input, blockSize=50, fakeGhost=4, makeFloat32=False)
    print("\nresult: ", (input == output).all())
    print(output.dtype, input.dtype)
    #1.grey_dilation..............
    print("\ngrey_dilation VoxelProcessind")
    output = vc.grey_dilation(input, structure=structure, makeFloat32=False)
    print("\ngrey_dilation Default")
    d = ndimage.grey_dilation(input, structure=structure)
    print("\nresult: ", (d == output).all())
    print(output.dtype, input.dtype)
    #2.grey_erosion..............
    print("\ngrey_erosion VoxelProcessind")
    output = vc.grey_erosion(input,
                             makeFloat32=False,
                             size=None,
                             footprint=None,
                             structure=structure,
                             output=None,
                             mode='reflect',
                             cval=0.0,
                             origin=0)
    print("\ngrey_erosion Default")
    d = ndimage.grey_erosion(input,
                             size=None,
                             footprint=None,
                             structure=structure,
                             output=None,
                             mode='reflect',
                             cval=0.0,
                             origin=0)
    print("\nresult: ", (d == output).all())
    #3.grey_closing..............
    print("\ngrey_closing VoxelProcessind")
    output = vc.grey_closing(input,
                             makeFloat32=False,
                             size=None,
                             footprint=None,
                             structure=structure,
                             output=None,
                             mode='reflect',
                             cval=0.0,
                             origin=0)
    print("\ngrey_closing Default")
    d = ndimage.grey_closing(input,
                             size=None,
                             footprint=None,
                             structure=structure,
                             output=None,
                             mode='reflect',
                             cval=0.0,
                             origin=0)
    print("\nresult: ", (d == output).all())
    print(output.dtype, input.dtype)
    #4.grey_opening..............
    print("\ngrey_opening VoxelProcessind")
    output = vc.grey_opening(input,
                             makeFloat32=False,
                             size=None,
                             footprint=None,
                             structure=structure,
                             output=None,
                             mode='reflect',
                             cval=0.0,
                             origin=0)
    print("\ngrey_opening Default")
    d = ndimage.grey_opening(input,
                             size=None,
                             footprint=None,
                             structure=structure,
                             output=None,
                             mode='reflect',
                             cval=0.0,
                             origin=0)
    print("\nresult: ", (d == output).all())
    #5.binary_closing..............
    print("\nbinary_closing VoxelProcessind")
    output = vc.binary_closing(input,
                               makeFloat32=False,
                               structure=None,
                               iterations=1,
                               output=None,
                               origin=0,
                               mask=None,
                               border_value=0,
                               brute_force=False)
    print("\nbinary_closing Default")
    d = ndimage.binary_closing(input,
                               structure=None,
                               iterations=1,
                               output=None,
                               origin=0,
                               mask=None,
                               border_value=0,
                               brute_force=False)
    print("\nresult: ", (d == output).all())
    print(output[151][151][151])
    #6.binary_opening..............
    print("\nbinary_opening VoxelProcessind")
    output = vc.binary_opening(input,
                               structure=None,
                               iterations=1,
                               output=None,
                               origin=0,
                               mask=None,
                               border_value=0,
                               brute_force=False)
    print("\nbinary_opening Default")
    d = ndimage.binary_opening(input,
                               structure=None,
                               iterations=1,
                               output=None,
                               origin=0,
                               mask=None,
                               border_value=0,
                               brute_force=False)
    print("\nresult: ", (d == output).all())
    #7.binary_dilation..............
    print("\nbinary_dilation VoxelProcessind")
    output = vc.binary_dilation(input,
                                makeFloat32=False,
                                structure=structure,
                                iterations=1,
                                mask=None,
                                output=None,
                                border_value=0,
                                origin=0,
                                brute_force=False)
    print("\nbinary_dilation Default")
    d = ndimage.binary_dilation(input,
                                structure=structure,
                                iterations=1,
                                mask=None,
                                output=None,
                                border_value=0,
                                origin=0,
                                brute_force=False)
    print("\nresult: ", (d == output).all())
    #8.binary_erosion..............
    print("\nbinary_erosion VoxelProcessind")
    output = vc.binary_erosion(input,
                               makeFloat32=False,
                               structure=None,
                               iterations=1,
                               mask=None,
                               output=None,
                               border_value=0,
                               origin=0,
                               brute_force=False)
    print("\nbinary_erosion Default")
    d = ndimage.binary_erosion(input,
                               structure=None,
                               iterations=1,
                               mask=None,
                               output=None,
                               border_value=0,
                               origin=0,
                               brute_force=False)
    print("\nresult: ", (d == output).all())
    #9.binary_fill_holes..............
    print("\nbinary_fill_holes VoxelProcessind")
    output = vc.binary_fill_holes(input,
                                  makeFloat32=False,
                                  structure=None,
                                  output=None,
                                  origin=0)
    print("\nbinary_fill_holes Default")
    d = ndimage.binary_fill_holes(input, structure=None, output=None, origin=0)
    print("\nresult: ", (d == output).all())
    #10.binary_hit_or_miss..............
    print("\nbinary_hit_or_miss VoxelProcessind")
    output = vc.binary_hit_or_miss(input,
                                   makeFloat32=False,
                                   structure1=None,
                                   structure2=None,
                                   output=None,
                                   origin1=0,
                                   origin2=None)
    print("\nbinary_hit_or_miss Default")
    d = ndimage.binary_hit_or_miss(input,
                                   structure1=None,
                                   structure2=None,
                                   output=None,
                                   origin1=0,
                                   origin2=None)
    print("\nresult: ", (d == output).all())
    #11.binary_propagation..............
    print("\nbinary_propagation VoxelProcessind")
    output = vc.binary_propagation(input,
                                   makeFloat32=False,
                                   structure=None,
                                   mask=None,
                                   output=None,
                                   border_value=0,
                                   origin=0)
    print("\nbinary_propagation Default")
    d = ndimage.binary_propagation(input,
                                   structure=None,
                                   mask=None,
                                   output=None,
                                   border_value=0,
                                   origin=0)
    print("\nresult: ", (d == output).all())
    #12.black_tophat..............
    print("\nblack_tophat VoxelProcessind")
    output = vc.black_tophat(input,
                             makeFloat32=False,
                             size=None,
                             footprint=None,
                             structure=structure,
                             output=None,
                             mode='reflect',
                             cval=0.0,
                             origin=0)
    print("\nblack_tophat Default")
    d = ndimage.black_tophat(input,
                             size=None,
                             footprint=None,
                             structure=structure,
                             output=None,
                             mode='reflect',
                             cval=0.0,
                             origin=0)
    print("\nresult: ", (d == output).all())
    #13.morphological_gradient..............
    print("\nmorphological_gradient VoxelProcessind")
    output = vc.morphological_gradient(input,
                                       makeFloat32=False,
                                       size=None,
                                       footprint=None,
                                       structure=structure,
                                       output=None,
                                       mode='reflect',
                                       cval=0.0,
                                       origin=0)
    print("\nmorphological_gradient Default")
    d = ndimage.morphological_gradient(input,
                                       size=None,
                                       footprint=None,
                                       structure=structure,
                                       output=None,
                                       mode='reflect',
                                       cval=0.0,
                                       origin=0)
    print("\nresult: ", (d == output).all())
    #14.morphological_laplace..............
    print("\nmorphological_laplace VoxelProcessind")
    output = vc.morphological_laplace(input,
                                      makeFloat32=False,
                                      size=None,
                                      footprint=None,
                                      structure=structure,
                                      output=None,
                                      mode='reflect',
                                      cval=0.0,
                                      origin=0)
    print("\nmorphological_laplace Default")
    d = ndimage.morphological_laplace(input,
                                      size=None,
                                      footprint=None,
                                      structure=structure,
                                      output=None,
                                      mode='reflect',
                                      cval=0.0,
                                      origin=0)
    print("\nresult: ", (d == output).all())
    #15.white_tophat..............
    print("\nwhite_tophat VoxelProcessind")
    output = vc.white_tophat(input,
                             makeFloat32=False,
                             size=None,
                             footprint=None,
                             structure=structure,
                             output=None,
                             mode='reflect',
                             cval=0.0,
                             origin=0)
    print("\nwhite_tophat VoxelProcessind Default")
    d = ndimage.white_tophat(input,
                             size=None,
                             footprint=None,
                             structure=structure,
                             output=None,
                             mode='reflect',
                             cval=0.0,
                             origin=0)
    print("\nresult: ", (d == output).all())
    #16.intMultiply..............
    print("\nintMultiply VoxelProcessind")
    output = vc.intMultiply(input,
                            makeFloat32=False,
                            blockSize=50,
                            fakeGhost=1,
                            scalar=10)
    print("\nintMultiply Default")
    d = input * 10
    print("\nresult: ", (d == output).all())

    print(
        "..............................Sparse.............................................."
    )

    input = random(400, 80000, density=0.3, dtype="float64")
    input = input.todense()
    input = np.array(input)
    input = np.reshape(input, (400, 200, 400))

    #0.Nothing..............
    print("\n nothing testing...")
    output = vc.nothing(input, makeFloat32=False)
    print("\nresult: ", (input == output).all())
    print(output.dtype, input.dtype)
    #1.grey_dilation..............
    print("\ngrey_dilation VoxelProcessind")
    output = vc.grey_dilation(input, structure=structure, makeFloat32=False)
    print("\ngrey_dilation Default")
    d = ndimage.grey_dilation(input, structure=structure)
    print("\nresult: ", (d == output).all())
    print(output.dtype, input.dtype)
    #2.grey_erosion..............
    print("\ngrey_erosion VoxelProcessind")
    output = vc.grey_erosion(input, makeFloat32=False, structure=structure)
    print("\ngrey_erosion Default")
    d = ndimage.grey_erosion(input, structure=structure)
    print("\nresult: ", (d == output).all())
    #3.grey_closing..............
    print("\ngrey_closing VoxelProcessind")
    output = vc.grey_closing(input, makeFloat32=False, structure=structure)
    print("\ngrey_closing Default")
    d = ndimage.grey_closing(input, structure=structure)
    print("\nresult: ", (d == output).all())
    print(output.dtype, input.dtype)
    #4.grey_opening..............
    print("\ngrey_opening VoxelProcessind")
    output = vc.grey_opening(input, makeFloat32=False, structure=structure)
    print("\ngrey_opening Default")
    d = ndimage.grey_opening(input, structure=structure)
    print("\nresult: ", (d == output).all())
    #5.binary_closing..............
    print("\nbinary_closing VoxelProcessind")
    output = vc.binary_closing(input, makeFloat32=False)
    print("\nbinary_closing Default")
    d = ndimage.binary_closing(input)
    print("\nresult: ", (d == output).all())
    #6.binary_opening..............
    print("\nbinary_opening VoxelProcessind")
    output = vc.binary_opening(input, makeFloat32=False)
    print("\nbinary_opening Default")
    d = ndimage.binary_opening(input)
    print("\nresult: ", (d == output).all())
    #7.binary_dilation..............
    print("\nbinary_dilation VoxelProcessind")
    output = vc.binary_dilation(input, makeFloat32=False, structure=structure)
    print("\nbinary_dilation Default")
    d = ndimage.binary_dilation(input, structure=structure)
    print("\nresult: ", (d == output).all())
    #8.binary_erosion..............
    print("\nbinary_erosion VoxelProcessind")
    output = vc.binary_erosion(input, makeFloat32=False)
    print("\nbinary_erosion Default")
    d = ndimage.binary_erosion(input)
    print("\nresult: ", (d == output).all())
    #9.binary_fill_holes..............
    print("\nbinary_fill_holes VoxelProcessind")
    output = vc.binary_fill_holes(input, makeFloat32=False)
    print("\nbinary_fill_holes Default")
    d = ndimage.binary_fill_holes(input)
    print("\nresult: ", (d == output).all())
    #10.binary_hit_or_miss..............
    print("\nbinary_hit_or_miss VoxelProcessind")
    output = vc.binary_hit_or_miss(input, makeFloat32=False)
    print("\nbinary_hit_or_miss Default")
    d = ndimage.binary_hit_or_miss(input)
    print("\nresult: ", (d == output).all())
    #11.binary_propagation..............
    print("\nbinary_propagation VoxelProcessind")
    output = vc.binary_propagation(input, makeFloat32=False)
    print("\nbinary_propagation Default")
    d = ndimage.binary_propagation(input)
    print("\nresult: ", (d == output).all())
    #12.black_tophat..............
    print("\nblack_tophat VoxelProcessind")
    output = vc.black_tophat(input, makeFloat32=False, structure=structure)
    print("\nblack_tophat Default")
    d = ndimage.black_tophat(input, structure=structure)
    print("\nresult: ", (d == output).all())
    #13.morphological_gradient..............
    print("\nmorphological_gradient VoxelProcessind")
    output = vc.morphological_gradient(
        input,
        structure=structure,
        makeFloat32=False,
    )
    print("\nmorphological_gradient Default")
    d = ndimage.morphological_gradient(input, structure=structure)
    print("\nresult: ", (d == output).all())
    #14.morphological_laplace..............
    print("\nmorphological_laplace VoxelProcessind")
    output = vc.morphological_laplace(input,
                                      structure=structure,
                                      makeFloat32=False)
    print("\nmorphological_laplace Default")
    d = ndimage.morphological_laplace(input, structure=structure)
    print("\nresult: ", (d == output).all())
    #15.white_tophat..............
    print("\nwhite_tophat VoxelProcessind")
    output = vc.white_tophat(input, makeFloat32=False, structure=structure)
    print("\nwhite_tophat VoxelProcessind Default")
    d = ndimage.white_tophat(input, structure=structure)
    print("\nresult: ", (d == output).all())
    #16.intMultiply..............
    print("\nintMultiply VoxelProcessind")
    output = vc.intMultiply(input, makeFloat32=False, scalar=10)
    print("\nintMultiply Default")
    d = input * 10
    print("\nresult: ", (d == output).all())
コード例 #46
0
limiti_base = np.empty(4)
limiti_picco = np.empty(4)
#
for prova in N_prove:
    flag_selezione_roi = True
    for file_corrente in lista_file:
        print(file_corrente[-21:-4])
        mappa = np.load(file_corrente)[0, :, :]
        if flag_selezione_roi:
            (limiti_base, limiti_picco) = seleziona_cordinate_mista(mappa)
        flag_selezione_roi = False  # prendo solo la prima
        if True:
            mappa = ndimage.gaussian_filter(mappa, sigma=10)
            footprint = np.matrix([[1, 1, 1], [1, 2, 1], [1, 1, 1]])
            mappa = ndimage.grey_dilation(mappa, footprint=footprint)
            mappa = ndimage.grey_erosion(mappa, footprint=footprint)
        plt.imshow(mappa)
        plt.show()
        media_base = np.mean(mappa[limiti_base[0]:limiti_base[1],
                                   limiti_base[2]:limiti_base[3]])
        std_base = np.std(mappa[limiti_base[0]:limiti_base[1],
                                limiti_base[2]:limiti_base[3]])
        media_picco = np.mean(mappa[limiti_picco[0]:limiti_picco[1],
                                    limiti_picco[2]:limiti_picco[3]])
        std_picco = np.std(mappa[limiti_picco[0]:limiti_picco[1],
                                 limiti_picco[2]:limiti_picco[3]])
        _, ax = plt.subplots()
        ax.imshow(mappa[limiti_picco[0]:limiti_picco[1],
                        limiti_picco[2]:limiti_picco[3]],
                  cmap='inferno')
        plt.show()
コード例 #47
0
    def run(self, workspace):
        '''Run the module on the image set'''
        seed_objects_name = self.seed_objects_name.value
        skeleton_name = self.image_name.value
        seed_objects = workspace.object_set.get_objects(seed_objects_name)
        labels = seed_objects.segmented
        labels_count = np.max(labels)
        label_range = np.arange(labels_count, dtype=np.int32) + 1

        skeleton_image = workspace.image_set.get_image(skeleton_name,
                                                       must_be_binary=True)
        skeleton = skeleton_image.pixel_data
        if skeleton_image.has_mask:
            skeleton = skeleton & skeleton_image.mask
        try:
            labels = skeleton_image.crop_image_similarly(labels)
        except:
            labels, m1 = cpo.size_similarly(skeleton, labels)
            labels[~m1] = 0
        #
        # The following code makes a ring around the seed objects with
        # the skeleton trunks sticking out of it.
        #
        # Create a new skeleton with holes at the seed objects
        # First combine the seed objects with the skeleton so
        # that the skeleton trunks come out of the seed objects.
        #
        # Erode the labels once so that all of the trunk branchpoints
        # will be within the labels
        #
        #
        # Dilate the objects, then subtract them to make a ring
        #
        my_disk = morph.strel_disk(1.5).astype(int)
        dilated_labels = grey_dilation(labels, footprint=my_disk)
        seed_mask = dilated_labels > 0
        combined_skel = skeleton | seed_mask

        closed_labels = grey_erosion(dilated_labels, footprint=my_disk)
        seed_center = closed_labels > 0
        combined_skel = combined_skel & (~seed_center)
        #
        # Fill in single holes (but not a one-pixel hole made by
        # a one-pixel image)
        #
        if self.wants_to_fill_holes:

            def size_fn(area, is_object):
                return (~is_object) and (area <= self.maximum_hole_size.value)

            combined_skel = morph.fill_labeled_holes(combined_skel,
                                                     ~seed_center, size_fn)
        #
        # Reskeletonize to make true branchpoints at the ring boundaries
        #
        combined_skel = morph.skeletonize(combined_skel)
        #
        # The skeleton outside of the labels
        #
        outside_skel = combined_skel & (dilated_labels == 0)
        #
        # Associate all skeleton points with seed objects
        #
        dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                    dilated_labels,
                                                    combined_skel, 1)
        #
        # Get rid of any branchpoints not connected to seeds
        #
        combined_skel[dlabels == 0] = False
        #
        # Find the branchpoints
        #
        branch_points = morph.branchpoints(combined_skel)
        #
        # Odd case: when four branches meet like this, branchpoints are not
        # assigned because they are arbitrary. So assign them.
        #
        # .  .
        #  B.
        #  .B
        # .  .
        #
        odd_case = (combined_skel[:-1, :-1] & combined_skel[1:, :-1]
                    & combined_skel[:-1, 1:] & combined_skel[1, 1])
        branch_points[:-1, :-1][odd_case] = True
        branch_points[1:, 1:][odd_case] = True
        #
        # Find the branching counts for the trunks (# of extra branches
        # eminating from a point other than the line it might be on).
        #
        branching_counts = morph.branchings(combined_skel)
        branching_counts = np.array([0, 0, 0, 1, 2])[branching_counts]
        #
        # Only take branches within 1 of the outside skeleton
        #
        dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
        branching_counts[~dilated_skel] = 0
        #
        # Find the endpoints
        #
        end_points = morph.endpoints(combined_skel)
        #
        # We use two ranges for classification here:
        # * anything within one pixel of the dilated image is a trunk
        # * anything outside of that range is a branch
        #
        nearby_labels = dlabels.copy()
        nearby_labels[distance_map > 1.5] = 0

        outside_labels = dlabels.copy()
        outside_labels[nearby_labels > 0] = 0
        #
        # The trunks are the branchpoints that lie within one pixel of
        # the dilated image.
        #
        if labels_count > 0:
            trunk_counts = fix(
                scind.sum(branching_counts, nearby_labels,
                          label_range)).astype(int)
        else:
            trunk_counts = np.zeros((0, ), int)
        #
        # The branches are the branchpoints that lie outside the seed objects
        #
        if labels_count > 0:
            branch_counts = fix(
                scind.sum(branch_points, outside_labels, label_range))
        else:
            branch_counts = np.zeros((0, ), int)
        #
        # Save the endpoints
        #
        if labels_count > 0:
            end_counts = fix(scind.sum(end_points, outside_labels,
                                       label_range))
        else:
            end_counts = np.zeros((0, ), int)
        #
        # Save measurements
        #
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        feature = "_".join((C_NEURON, F_NUMBER_TRUNKS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, trunk_counts)
        feature = "_".join(
            (C_NEURON, F_NUMBER_NON_TRUNK_BRANCHES, skeleton_name))
        m.add_measurement(seed_objects_name, feature, branch_counts)
        feature = "_".join((C_NEURON, F_NUMBER_BRANCH_ENDS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, end_counts)
        #
        # Collect the graph information
        #
        if self.wants_neuron_graph:
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            intensity_image = workspace.image_set.get_image(
                self.intensity_image_name.value)
            edge_graph, vertex_graph = self.make_neuron_graph(
                combined_skel, dlabels, trunk_mask,
                branch_points & ~trunk_mask, end_points,
                intensity_image.pixel_data)

            image_number = workspace.measurements.image_set_number

            edge_path, vertex_path = self.get_graph_file_paths(
                m, m.image_number)
            workspace.interaction_request(self,
                                          m.image_number,
                                          edge_path,
                                          edge_graph,
                                          vertex_path,
                                          vertex_graph,
                                          headless_ok=True)

            if self.show_window:
                workspace.display_data.edge_graph = edge_graph
                workspace.display_data.vertex_graph = vertex_graph
                workspace.display_data.intensity_image = intensity_image.pixel_data
        #
        # Make the display image
        #
        if self.show_window or self.wants_branchpoint_image:
            branchpoint_image = np.zeros(
                (skeleton.shape[0], skeleton.shape[1], 3))
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            branch_mask = branch_points & (outside_labels != 0)
            end_mask = end_points & (outside_labels != 0)
            branchpoint_image[outside_skel, :] = 1
            branchpoint_image[trunk_mask | branch_mask | end_mask, :] = 0
            branchpoint_image[trunk_mask, 0] = 1
            branchpoint_image[branch_mask, 1] = 1
            branchpoint_image[end_mask, 2] = 1
            branchpoint_image[dilated_labels != 0, :] *= .875
            branchpoint_image[dilated_labels != 0, :] += .1
            if self.show_window:
                workspace.display_data.branchpoint_image = branchpoint_image
            if self.wants_branchpoint_image:
                bi = cpi.Image(branchpoint_image, parent_image=skeleton_image)
                workspace.image_set.add(self.branchpoint_image_name.value, bi)
コード例 #48
0
def apply_math_morphologie(input_image,
                           morph_type,
                           morph_op='binary',
                           size=3,
                           show_result=False):
    """
    Apply a mathematical morphologie to a image.
    
    Parameters
    ----------
    input_image : nparray
        Represents the image that you want to apply the morphologie
    morph_type: str
        Must be one of:
                'erosion',
                'dilation',
                'opening',
                'closing',
                'propagation',
                'reconstruction',
                'open/close',
                'full_reconstruction'
    morph_op: str
        'binary' or 'grey', representing the two types of supported image
    size: int
        The morphologie structure is a matrix of ones with sizeXsize
    show_result: Boolean
            If True, the result is plotted using matplotlib, default is False.
    
    Returns
    -------
    nparray
        The image after morphologia, as the same format of the input.
    
    Note
    ----
    Some configurations may not exit.
    They are:
        grey propagation
        grey reconstruction
        grey full_reconstruction
    """
    if not morph_type in mathematical_morphologies_names:
        raise (NotImplemented)
    if not morph_op in mathematical_morphologies_options:
        raise (NotImplemented)

    if morph_op == 'binary':
        if morph_type == 'erosion':
            output_image = ndimage.binary_erosion(input_image,
                                                  structure=np.ones(
                                                      (size, size)))
        elif morph_type == 'dilation':
            output_image = ndimage.binary_dilation(input_image,
                                                   structure=np.ones(
                                                       (size, size)))
        elif morph_type == 'opening':
            output_image = ndimage.binary_opening(input_image,
                                                  structure=np.ones(
                                                      (size, size)))
        elif morph_type == 'closing':
            output_image = ndimage.binary_closing(input_image,
                                                  structure=np.ones(
                                                      (size, size)))
        elif morph_type == 'propagation':
            output_image = ndimage.binary_propagation(input_image,
                                                      structure=np.ones(
                                                          (size, size)))
        elif morph_type == 'reconstruction':
            eroded_img = ndimage.binary_erosion(input_image,
                                                structure=np.ones(
                                                    (size, size)))
            output_image = ndimage.binary_propagation(eroded_img,
                                                      structure=np.ones(
                                                          (size, size)),
                                                      mask=input_image)
        elif morph_type == 'open/close':
            open_img = ndimage.binary_opening(
                input_image, structure=np.ones(
                    (size, size)))  # Remove small white regions
            output_image = ndimage.binary_closing(
                open_img, structure=np.ones(
                    (size, size)))  # Remove small black hole
        elif morph_type == 'full_reconstruction':
            eroded_img = ndimage.binary_erosion(input_image,
                                                structure=np.ones(
                                                    (size, size)))
            reconstruct_img = ndimage.binary_propagation(eroded_img,
                                                         structure=np.ones(
                                                             (size, size)),
                                                         mask=input_image)
            tmp = np.logical_not(reconstruct_img)
            eroded_tmp = ndimage.binary_erosion(tmp,
                                                structure=np.ones(
                                                    (size, size)))
            output_image = np.logical_not(
                ndimage.binary_propagation(eroded_tmp,
                                           structure=np.ones((size, size)),
                                           mask=tmp))
    elif morph_op == 'grey':
        if morph_type == 'erosion':
            output_image = ndimage.grey_erosion(input_image, size=size)
        elif morph_type == 'dilation':
            output_image = ndimage.grey_dilation(input_image, size=size)
        elif morph_type == 'opening':
            output_image = ndimage.grey_opening(input_image, size=size)
        elif morph_type == 'closing':
            output_image = ndimage.grey_closing(input_image, size=size)
        elif morph_type == 'propagation':
            raise (NotImplemented)
        elif morph_type == 'reconstruction':
            raise (NotImplemented)
        elif morph_type == 'open/close':
            open_img = ndimage.grey_opening(
                input_image, size=size)  # Remove small white regions
            output_image = ndimage.grey_closing(
                open_img, size=size)  # Remove small black hole
        elif morph_type == 'full_reconstruction':
            raise (NotImplemented)

    if show_result:
        show_images_and_hists(
            [input_image, output_image],
            titles=[
                'Input',
                'Output Image - %s%s\n%s - %s' %
                ("Morph: ", morph_type, str(morph_op), str(size))
            ],
            colorbar=True)

    output_image = output_image.astype(input_image.dtype)  # input format
    return output_image
コード例 #49
0
a += 0.25 * np.random.standard_normal(a.shape)
print("a======>", a)
mask = a >= 0.5
print("mask=====>", mask)
opened_mask = ndimage.binary_opening(mask)
print("opened_mask=====>", opened_mask)
closed_mask = ndimage.binary_closing(opened_mask)
print("closed_mask=====>", closed_mask)

a = np.zeros((7, 7), dtype=np.int)
a[1:6, 1:6] = 3
a[4, 4] = 2
a[2, 3] = 1
print("a=====>", a)

c = ndimage.grey_erosion(a, size=(3, 3))
print("c=====>", c)

print("==== 图像测量 ====")
x, y = np.indices((300, 300))
sig = np.sin(2 * np.pi * x / 50.) * np.sin(
    2 * np.pi * y / 50.) * (1 + x * y / 50.**2)**2
mask = sig > 1

#现在我们查找图像中对象的各种信息:
labels, nb = ndimage.label(mask)
print("labels====>", labels)
print("nb====>", nb)

areas = ndimage.sum(mask, labels, range(1, labels.max() + 1))
print("areas====>", areas)
コード例 #50
0
# <codecell>

import numpy as np
image = np.random.random((512, 512))

footprint = np.array([[0, 1, 0],
                      [1, 1, 1],
                      [0, 1, 0]], dtype=bool)   



# <codecell>

from scipy import ndimage as ndi
%timeit ndi.grey_erosion(image, footprint=footprint)   



# <codecell>

%timeit ndi.generic_filter(image, np.min, footprint=footprint)   



# <codecell>

f'Slowdown is {825 / 2.85} times'   


コード例 #51
0
ファイル: rag.py プロジェクト: Zhang5555/scikit-image
def rag_boundary(labels, edge_map, connectivity=2):
    """ Comouter RAG based on region boundaries

    Given an image's initial segmentation and its edge map this method
    constructs the corresponding Region Adjacency Graph (RAG). Each node in the
    RAG represents a set of pixels within the image with the same label in
    `labels`. The weight between two adjacent regions is the average value
    in `edge_map` along their boundary.

    labels : ndarray
        The labelled image.
    edge_map : ndarray
        This should have the same shape as that of `labels`. For all pixels
        along the boundary between 2 adjacent regions, the average value of the
        corresponding pixels in `edge_map` is the edge weight between them.
    connectivity : int, optional
        Pixels with a squared distance less than `connectivity` from each other
        are considered adjacent. It can range from 1 to `labels.ndim`. Its
        behavior is the same as `connectivity` parameter in
        `scipy.ndimage.filters.generate_binary_structure`.

    Examples
    --------
    >>> from skimage import data, segmentation, filters, color
    >>> from skimage.future import graph
    >>> img = data.chelsea()
    >>> labels = segmentation.slic(img)
    >>> edge_map = filters.sobel(color.rgb2gray(img))
    >>> rag = graph.rag_boundary(labels, edge_map)

    """

    conn = ndi.generate_binary_structure(labels.ndim, connectivity)
    eroded = ndi.grey_erosion(labels, footprint=conn)
    dilated = ndi.grey_dilation(labels, footprint=conn)
    boundaries0 = (eroded != labels)
    boundaries1 = (dilated != labels)
    labels_small = np.concatenate((eroded[boundaries0], labels[boundaries1]))
    labels_large = np.concatenate((labels[boundaries0], dilated[boundaries1]))
    n = np.max(labels_large) + 1

    # use a dummy broadcast array as data for RAG
    ones = as_strided(np.ones((1,), dtype=np.float), shape=labels_small.shape,
                      strides=(0,))
    count_matrix = sparse.coo_matrix((ones, (labels_small, labels_large)),
                                     dtype=np.int_, shape=(n, n)).tocsr()
    data = np.concatenate((edge_map[boundaries0], edge_map[boundaries1]))

    data_coo = sparse.coo_matrix((data, (labels_small, labels_large)))
    graph_matrix = data_coo.tocsr()
    graph_matrix.data /= count_matrix.data

    rag = RAG()
    rag.add_weighted_edges_from(_edge_generator_from_csr(graph_matrix),
                                weight='weight')
    rag.add_weighted_edges_from(_edge_generator_from_csr(count_matrix),
                                weight='count')

    for n in rag.nodes():
        rag.node[n].update({'labels': [n]})

    return rag
d = ndimage.grey_dilation(input_var, structure=structure)
print("scipy grey_dilation: ", (t.time() - start_time), " sec")
print("\nresult: ", (d == output).all())

#2.grey_erosion..............
print("\ngrey_erosion VoxelProcessing")
start_time = t.time()
output = vc.grey_erosion(input_var,
                         no_of_blocks=7,
                         make_float32=False,
                         structure=structure)
print("vc grey_erosion: ", (t.time() - start_time), " sec")
print("\ngrey_erosion Default")
start_time = t.time()
d = ndimage.grey_erosion(
    input_var,
    structure=structure,
)
print("scipy grey_erosion: ", (t.time() - start_time), " sec")
print("\nresult: ", (d == output).all())

#3.grey_closing..............
print("\ngrey_closing VoxelProcessing")
start_time = t.time()
output = vc.grey_closing(input_var,
                         make_float32=False,
                         size=None,
                         footprint=None,
                         structure=structure,
                         output=None,
                         mode='reflect',
                         cval=0.0,
コード例 #53
0
ファイル: trainer.py プロジェクト: mahdi139091/dollyzoom
def soc_adaptation_iter(
    modnet, backup_modnet, optimizer, image,
    soc_semantic_scale=100.0, soc_detail_scale=1.0):
    """ Self-Supervised sub-objective consistency (SOC) adaptation iteration of MODNet
    This function fine-tunes MODNet for one iteration in an unlabeled dataset.
    Note that SOC can only fine-tune a converged MODNet, i.e., MODNet that has been 
    trained in a labeled dataset.

    Arguments:
        modnet (torch.nn.Module): instance of MODNet
        backup_modnet (torch.nn.Module): backup of the trained MODNet
        optimizer (torch.optim.Optimizer): optimizer for self-supervised SOC 
        image (torch.autograd.Variable): input RGB image
                                         its pixel values should be normalized
        soc_semantic_scale (float): scale of the SOC semantic loss 
                                    NOTE: please adjust according to your dataset
        soc_detail_scale (float): scale of the SOC detail loss
                                  NOTE: please adjust according to your dataset
    
    Returns:
        soc_semantic_loss (torch.Tensor): loss of the semantic SOC
        soc_detail_loss (torch.Tensor): loss of the detail SOC

    Example:
        import copy
        import torch
        from src.models.modnet import MODNet
        from src.trainer import soc_adaptation_iter

        bs = 1          # batch size
        lr = 0.00001    # learn rate
        epochs = 10     # total epochs

        modnet = torch.nn.DataParallel(MODNet()).cuda()
        modnet = LOAD_TRAINED_CKPT()    # NOTE: please finish this function

        optimizer = torch.optim.Adam(modnet.parameters(), lr=lr, betas=(0.9, 0.99))
        dataloader = CREATE_YOUR_DATALOADER(bs)     # NOTE: please finish this function

        for epoch in range(0, epochs):
            backup_modnet = copy.deepcopy(modnet)
            for idx, (image) in enumerate(dataloader):
                soc_semantic_loss, soc_detail_loss = \
                    soc_adaptation_iter(modnet, backup_modnet, optimizer, image)
    """

    global blurer

    # set the backup model to eval mode
    backup_modnet.eval()

    # set the main model to train mode and freeze its norm layers
    modnet.train()
    modnet.module.freeze_norm()

    # clear the optimizer
    optimizer.zero_grad()

    # forward the main model
    pred_semantic, pred_detail, pred_matte = modnet(image, False)

    # forward the backup model
    with torch.no_grad():
        _, pred_backup_detail, pred_backup_matte = backup_modnet(image, False)

    # calculate the boundary mask from `pred_matte` and `pred_semantic`
    pred_matte_fg = (pred_matte.detach() > 0.1).float()
    pred_semantic_fg = (pred_semantic.detach() > 0.1).float()
    pred_semantic_fg = F.interpolate(pred_semantic_fg, scale_factor=16, mode='bilinear')
    pred_fg = pred_matte_fg * pred_semantic_fg

    n, c, h, w = pred_matte.shape
    np_pred_fg = pred_fg.data.cpu().numpy()
    np_boundaries = np.zeros([n, c, h, w])
    for sdx in range(0, n):
        sample_np_boundaries = np_boundaries[sdx, 0, ...]
        sample_np_pred_fg = np_pred_fg[sdx, 0, ...]

        side = int((h + w) / 2 * 0.05)
        dilated = grey_dilation(sample_np_pred_fg, size=(side, side))
        eroded = grey_erosion(sample_np_pred_fg, size=(side, side))

        sample_np_boundaries[np.where(dilated - eroded != 0)] = 1
        np_boundaries[sdx, 0, ...] = sample_np_boundaries

    boundaries = torch.tensor(np_boundaries).float().cuda()

    # sub-objectives consistency between `pred_semantic` and `pred_matte`
    # generate pseudo ground truth for `pred_semantic`
    downsampled_pred_matte = blurer(F.interpolate(pred_matte, scale_factor=1/16, mode='bilinear'))
    pseudo_gt_semantic = downsampled_pred_matte.detach()
    pseudo_gt_semantic = pseudo_gt_semantic * (pseudo_gt_semantic > 0.01).float()
    
    # generate pseudo ground truth for `pred_matte`
    pseudo_gt_matte = pred_semantic.detach()
    pseudo_gt_matte = pseudo_gt_matte * (pseudo_gt_matte > 0.01).float()

    # calculate the SOC semantic loss
    soc_semantic_loss = F.mse_loss(pred_semantic, pseudo_gt_semantic) + F.mse_loss(downsampled_pred_matte, pseudo_gt_matte)
    soc_semantic_loss = soc_semantic_scale * torch.mean(soc_semantic_loss)

    # NOTE: using the formulas in our paper to calculate the following losses has similar results
    # sub-objectives consistency between `pred_detail` and `pred_backup_detail` (on boundaries only)
    backup_detail_loss = boundaries * F.l1_loss(pred_detail, pred_backup_detail)
    backup_detail_loss = torch.sum(backup_detail_loss, dim=(1,2,3)) / torch.sum(boundaries, dim=(1,2,3))
    backup_detail_loss = torch.mean(backup_detail_loss)

    # sub-objectives consistency between pred_matte` and `pred_backup_matte` (on boundaries only)
    backup_matte_loss = boundaries * F.l1_loss(pred_matte, pred_backup_matte)
    backup_matte_loss = torch.sum(backup_matte_loss, dim=(1,2,3)) / torch.sum(boundaries, dim=(1,2,3))
    backup_matte_loss = torch.mean(backup_matte_loss)

    soc_detail_loss = soc_detail_scale * (backup_detail_loss + backup_matte_loss)

    # calculate the final loss, backward the loss, and update the model 
    loss = soc_semantic_loss + soc_detail_loss

    loss.backward()
    optimizer.step()

    return soc_semantic_loss, soc_detail_loss
コード例 #54
0
ファイル: labels.py プロジェクト: ktaletsk/napari
    def _raw_to_displayed(self, raw):
        """Determine displayed image from a saved raw image and a saved seed.

        This function ensures that the 0 label gets mapped to the 0 displayed
        pixel.

        Parameters
        ----------
        raw : array or int
            Raw integer input image.

        Returns
        -------
        image : array
            Image mapped between 0 and 1 to be displayed.
        """
        if (not self.show_selected_label
                and self._color_mode == LabelColorMode.DIRECT):
            u, inv = np.unique(raw, return_inverse=True)
            image = np.array([
                self._label_color_index[x] if x in self._label_color_index else
                self._label_color_index[None] for x in u
            ])[inv].reshape(raw.shape)
        elif (not self.show_selected_label
              and self._color_mode == LabelColorMode.AUTO):
            image = np.where(raw > 0, low_discrepancy_image(raw, self._seed),
                             0)
        elif (self.show_selected_label
              and self._color_mode == LabelColorMode.AUTO):
            selected = self._selected_label
            image = np.where(
                raw == selected,
                low_discrepancy_image(selected, self._seed),
                0,
            )
        elif (self.show_selected_label
              and self._color_mode == LabelColorMode.DIRECT):
            selected = self._selected_label
            if selected not in self._label_color_index:
                selected = None
            index = self._label_color_index
            image = np.where(
                raw == selected,
                index[selected],
                np.where(
                    raw != self._background_label,
                    index[None],
                    index[self._background_label],
                ),
            )
        else:
            raise ValueError("Unsupported Color Mode")

        if self.contour > 0 and raw.ndim == 2:
            image = np.zeros_like(raw)
            struct_elem = ndi.generate_binary_structure(raw.ndim, 1)
            thickness = self.contour
            thick_struct_elem = ndi.iterate_structure(struct_elem,
                                                      thickness).astype(bool)
            boundaries = ndi.grey_dilation(
                raw, footprint=struct_elem) != ndi.grey_erosion(
                    raw, footprint=thick_struct_elem)
            image[boundaries] = raw[boundaries]
            image = np.where(image > 0,
                             low_discrepancy_image(image, self._seed), 0)
        elif self.contour > 0 and raw.ndim > 2:
            warnings.warn("Contours are not displayed during 3D rendering")

        return image
コード例 #55
0
def _create_world_composite(items, lon_limits=None,
                            erosion_size=20,
                            smooth_width=20):
    # smooth_sigma = 4

    img = None
    for (path, area, timeslot) in items:

        if not isinstance(area, AreaDefinition):
            area = get_area_def(area)

        next_img = read_image(path, area, timeslot)

        if img is None:
            img = next_img
        else:
            # scaled_smooth_sigma = smooth_sigma * (float(img.width) / 1000.0)

            img_mask = reduce(np.ma.mask_or,
                              [chn.mask for chn in img.channels])
            next_img_mask = reduce(np.ma.mask_or,
                                   [chn.mask for chn in next_img.channels])

            # Mask overlapping areas away
            if lon_limits:
                for sat in lon_limits:
                    if sat in path:
                        mask_limits = calc_pixel_mask_limits(area,
                                                             lon_limits[sat])
                        for lim in mask_limits:
                            next_img_mask[:, lim[0]:lim[1]] = 1
                        break

            alpha = np.ones(next_img_mask.shape, dtype='float')
            alpha[next_img_mask] = 0.0

            if erosion_size is not None and smooth_width is not None:
                scaled_erosion_size = erosion_size * (float(img.width) /
                                                      1000.0)
                scaled_smooth_width = smooth_width * (float(img.width) /
                                                      1000.0)

                # smooth_alpha = ndi.gaussian_filter(
                #     ndi.grey_erosion(alpha, size=(scaled_erosion_size,
                #                                   scaled_erosion_size)),
                #        scaled_smooth_sigma)
                smooth_alpha = ndi.uniform_filter(
                    ndi.grey_erosion(alpha, size=(scaled_erosion_size,
                                                  scaled_erosion_size)),
                    scaled_smooth_width)
                smooth_alpha[img_mask] = alpha[img_mask]
            else:
                smooth_alpha = alpha

            for i in range(0, min(len(img.channels), len(next_img.channels))):
                chdata = next_img.channels[i].data * smooth_alpha + \
                    img.channels[i].data * (1 - smooth_alpha)
                chmask = np.logical_and(img_mask, next_img_mask)
                img.channels[i] = \
                    np.ma.masked_where(chmask, chdata)

    return img
コード例 #56
0
                         no_of_blocks=7,
                         make_float32=False,
                         size=None,
                         footprint=None,
                         structure=structure,
                         output=None,
                         mode='reflect',
                         cval=0.0,
                         origin=0)
print("vc grey_erosion: ", (t.time() - start_time), " sec")
print("\ngrey_erosion Default")
start_time = t.time()
d = ndimage.grey_erosion(input_var,
                         size=None,
                         footprint=None,
                         structure=structure,
                         output=None,
                         mode='reflect',
                         cval=0.0,
                         origin=0)
print("scipy grey_erosion: ", (t.time() - start_time), " sec")
print("\nresult: ", (d == output).all())
#3.grey_closing..............
print("\ngrey_closing VoxelProcessing")
start_time = t.time()
output = vc.grey_closing(input_var,
                         make_float32=False,
                         size=None,
                         footprint=None,
                         structure=structure,
                         output=None,
                         mode='reflect',
コード例 #57
0
    def _raw_to_displayed(self, raw):
        """Determine displayed image from a saved raw image and a saved seed.

        This function ensures that the 0 label gets mapped to the 0 displayed
        pixel.

        Parameters
        ----------
        raw : array or int
            Raw integer input image.

        Returns
        -------
        image : array
            Image mapped between 0 and 1 to be displayed.
        """
        if raw.dtype == bool:
            raw = raw.view(dtype=np.uint8)

        if (not self.show_selected_label
                and self._color_mode == LabelColorMode.DIRECT):
            u, inv = np.unique(raw, return_inverse=True)
            image = np.array([
                self._label_color_index[x] if x in self._label_color_index else
                self._label_color_index[None] for x in u
            ])[inv].reshape(raw.shape)
        elif (not self.show_selected_label
              and self._color_mode == LabelColorMode.AUTO):
            try:
                image = self._all_vals[raw]
            except IndexError:
                max_val = np.max(raw)
                self._all_vals = low_discrepancy_image(np.arange(max_val + 1),
                                                       self._seed)
                self._all_vals[0] = 0
                image = self._all_vals[raw]
        elif (self.show_selected_label
              and self._color_mode == LabelColorMode.AUTO):
            selected_color = low_discrepancy_image(self._selected_label,
                                                   self._seed)
            if self.selected_label > len(self._all_vals):
                self._all_vals = low_discrepancy_image(
                    np.arange(self.selected_label + 1), self._seed)
            colors = np.zeros(len(self._all_vals))
            colors[self.selected_label] = selected_color
            image = colors[raw]
        elif (self.show_selected_label
              and self._color_mode == LabelColorMode.DIRECT):
            selected = self._selected_label
            if selected not in self._label_color_index:
                selected = None
            index = self._label_color_index
            image = np.where(
                raw == selected,
                index[selected],
                np.where(
                    raw != self._background_label,
                    index[None],
                    index[self._background_label],
                ),
            )
        else:
            raise ValueError("Unsupported Color Mode")

        if self.contour > 0 and raw.ndim == 2:
            image = np.zeros_like(raw)
            struct_elem = ndi.generate_binary_structure(raw.ndim, 1)
            thickness = self.contour
            thick_struct_elem = ndi.iterate_structure(struct_elem,
                                                      thickness).astype(bool)
            boundaries = ndi.grey_dilation(
                raw, footprint=struct_elem) != ndi.grey_erosion(
                    raw, footprint=thick_struct_elem)
            image[boundaries] = raw[boundaries]
            image = self._all_vals[image]
        elif self.contour > 0 and raw.ndim > 2:
            warnings.warn(
                trans._(
                    "Contours are not displayed during 3D rendering",
                    deferred=True,
                ))

        return image
コード例 #58
0
import os, sys, scipy
from scipy import ndimage
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image

inFile = ''
outFile = ''

if len(sys.argv) != 3:
    print('Input format error!')
else:
    inFile = sys.argv[1]
    outFile = sys.argv[2]

#im = ndimage.imread(inFile)
#im = ndimage.binary_erosion(im).astype(np.float32)
#scipy.misc.imsave(outFile, im)

#im2 = Image.open(inFile)
#im2 = ndimage.binary_dilation(im2)

im = scipy.misc.imread(inFile, flatten=True).astype(np.uint8)
im2 = ndimage.grey_erosion(im, size=(100, 10))

scipy.misc.imsave(outFile, im2)
コード例 #59
0
ファイル: image_augmentation.py プロジェクト: WarBean/MLUtil
 def f(image_config):
     s1 = random.randrange(func_config.min_size, func_config.max_size + 1)
     s2 = random.randrange(func_config.min_size, func_config.max_size + 1)
     array = ndimage.grey_erosion(image_config.image, size = (s1, s2))
     image = Image.fromarray(array)
     image_config.image = image
コード例 #60
0
def slow_fill(input_array, four_way=False):
    """
    Slow flood fill depressions/sinks in floating point array

    Parameters
    ----------
    input_array : ndarray
        Input array to be filled
    four_way : bool, optional
        If True, search 4 immediately adjacent cells (cross structuring element)
        If False, search all 8 adjacent cells (square structuring element).
        The Default is False.

    Returns
    -------
    out : ndarray
        Filled array

    References
    ----------
    Soile, P., Vogt, J., and Colombo, R., 2003. Carving and Adaptive Drainage Enforcement of Grid Digital Elevation Models. Water Resources Research, 39(12), 1366
    Soille, P., 1999. Morphological Image Analysis: Principles and Applications, Springer-Verlag, pp. 173-174

    """
    print('Slow Fill')

    # Rename or copy input so that input_array is a local variable?
    # input_array = np.copy(input_array)

    # Set h_max to a value larger than the array maximum to ensure
    #   that the while loop will terminate
    h_max = np.max(input_array * 2.0)

    # Build mask of cells with data not on the edge of the image
    # Use 3x3 square Structuring element
    # Build Structuring element only using NumPy module
    # Structuring element could also be built using SciPy ndimage module
    #   el = ndimage.generate_binary_structure(2,2).astype(np.int)
    inside_mask = ndimage.binary_erosion(
        np.isfinite(input_array),
        structure=np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]).astype(np.bool))

    # Initialize output array as max value test_array except edges
    output_array = np.copy(input_array)
    output_array[inside_mask] = h_max

    # Array for storing previous iteration
    output_old_array = np.copy(input_array)
    output_old_array[:] = 0

    # Cross structuring element
    if four_way:
        el = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]).astype(np.bool)
        # el = ndimage.generate_binary_structure(2, 1).astype(np.int)
    else:
        el = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]).astype(np.bool)
        # el = ndimage.generate_binary_structure(2, 2).astype(np.int)

    # Iterate until marker array doesn't change
    while not np.array_equal(output_old_array, output_array):
        output_old_array = np.copy(output_array)
        output_array = np.maximum(
            input_array,
            ndimage.grey_erosion(output_array, size=(3, 3), footprint=el))
    return output_array