コード例 #1
0
    def run_one(fname, outname):
        N = 1

        im = img.open(fname)
        #im = im.filter(ImageFilter.BLUR)
        im = im.resize((600, 600), img.ANTIALIAS)
        im = im.convert('L')
        im = invert(im)


        x = np.asarray(im)
        y = x 

        size = 3

        for i in range(N):
            y = morph.dilate(y, morph.sedisk(size))
            y = morph.close(y, morph.sedisk(size))

        jm = img.fromarray(y)
        jm = invert(jm)

        jm = jm.resize((400, 400), img.ANTIALIAS)

        jm.save(outname)
コード例 #2
0
def buildSeedPoints(image, mode='com'):
    """ Successive set of filters to take a still image, isolate cell-like ROIs, and return a
    list of points representing center points to pass into core.segmentation.pickCells.

    Filters are, in order:  thresholding, morphological closing, and then a connected pixel cutoff filter.

    Often, the best thing to pass into this image as a high pass filtered version of your field of view.

    :param image: a 2d numpy array to build points from
    :param mode: an optional string, either: 'centriod' or 'com'
    :returns: tuple, (seedPoints, seedPointImage) 2d and 3d numpy array, containing coordinates and an image of points, respectively
    """
    #    seedMask = ipg.binaryErode(ipg.connectedPixelFilter(pymorph.label(ipg.threshold(ipg.subGaussian(image))>0)))
    #    binarySeedMask = ipg.connectedPixelFilter(pymorph.label(pymorph.close(ipg.threshold(ipg.subGaussian(image))>0)))
    binarySeedMask = ipg.connectedPixelFilter(pymorph.label(pymorph.close(ipg.threshold(image))))
    seedMask = pymorph.label(binarySeedMask)
    seedingRegionProps = regionProps(image, seedMask)

    if mode is 'centroid':
        seedPoints = [r['centroid'] for r in sorted(seedingRegionProps, key=lambda x: x['meanIntensity'],reverse=True)]
    elif mode is 'com':
        seedPoints = [r['com'] for r in sorted(seedingRegionProps, key=lambda x: x['meanIntensity'],reverse=True)]

    #    pdb.set_trace()

    seedPoints = np.floor(np.array(seedPoints))
    seedPointImage = np.zeros_like(seedMask)        
    for point in seedPoints:
        seedPointImage[point] = 255

    return seedPoints, seedPointImage
コード例 #3
0
def GradBasedSegmentation(im):
    blur = nd.gaussian_filter(im, 16)
    rmax = pymorph.regmax(blur)
    T = mahotas.thresholding.otsu(blur)
    bImg0 = im > T
    # bImg01=nd.binary_closing(bImg0,iterations=2)
    bImg01 = pymorph.close(bImg0, pymorph.sedisk(3))
    bImg = pymorph.open(bImg01, pymorph.sedisk(4))
    # bImg=nd.binary_opening(bImg01,iterations=3)
    b = pymorph.edgeoff(bImg)
    d = distanceTranform(b)
    seeds, nr_nuclei = nd.label(rmax)
    lab = mahotas.cwatershed(d, seeds)
    return lab
コード例 #4
0
ファイル: SegExtractChrom.py プロジェクト: zhou0919/DeepFISH
def GradBasedSegmentation(im):
    blur=nd.gaussian_filter(im, 16)
    rmax = pymorph.regmax(blur)
    T = mahotas.thresholding.otsu(blur)
    bImg0=im>T
    #bImg01=nd.binary_closing(bImg0,iterations=2)
    bImg01=pymorph.close(bImg0, pymorph.sedisk(3))
    bImg=pymorph.open(bImg01, pymorph.sedisk(4))
    #bImg=nd.binary_opening(bImg01,iterations=3)
    b=pymorph.edgeoff(bImg)
    d=distanceTranform(b)
    seeds,nr_nuclei = nd.label(rmax)
    lab=mahotas.cwatershed(d,seeds)
    return lab
コード例 #5
0
ファイル: fuckyou.py プロジェクト: Tarrasch/ravens-test
import cv2
import numpy
import numpy as np
import scipy
import pylab as pl
import pylab
import pymorph
from scipy import misc

def s(fig): pl.imshow(fig); pl.gray(); pl.show()

e = lambda fig: pymorph.erode(fig)
d = lambda fig: pymorph.dilate(fig)
o = lambda fig: pymorph.open(fig)
c = lambda fig: pymorph.close(fig)
a = lambda fun, n: reduce(lambda f1, f2: lambda x: f1(f2(x)), [fun]*n, lambda x: x)

img= 255-cv2.imread('reps/2/2.png', cv2.CV_LOAD_IMAGE_GRAYSCALE)
imgb = img > 128
BW=imgb

# grab contours
cs,_ = cv2.findContours( BW.astype('uint8'), mode=cv2.RETR_LIST,
                             method=cv2.CHAIN_APPROX_SIMPLE )
# set up the 'FilledImage' bit of regionprops.
filledI = np.zeros(BW.shape[0:2]).astype('uint8')
# set up the 'ConvexImage' bit of regionprops.
convexI = np.zeros(BW.shape[0:2]).astype('uint8')

# for each contour c in cs:
# will demonstrate with cs[0] but you could use a loop.
コード例 #6
0
import pylab as pl
import pylab
import pymorph
from scipy import misc


def s(fig):
    pl.imshow(fig)
    pl.gray()
    pl.show()


e = lambda fig: pymorph.erode(fig)
d = lambda fig: pymorph.dilate(fig)
o = lambda fig: pymorph.open(fig)
c = lambda fig: pymorph.close(fig)
a = lambda fun, n: reduce(lambda f1, f2: lambda x: f1(f2(x)), [fun] * n, lambda
                          x: x)

img = 255 - cv2.imread('reps/2/2.png', cv2.CV_LOAD_IMAGE_GRAYSCALE)
imgb = img > 128
BW = imgb

# grab contours
cs, _ = cv2.findContours(BW.astype('uint8'),
                         mode=cv2.RETR_LIST,
                         method=cv2.CHAIN_APPROX_SIMPLE)
# set up the 'FilledImage' bit of regionprops.
filledI = np.zeros(BW.shape[0:2]).astype('uint8')
# set up the 'ConvexImage' bit of regionprops.
convexI = np.zeros(BW.shape[0:2]).astype('uint8')
コード例 #7
0
glom_hed = rgb2hed(glom_rgb) #hed
glom_h =  glom_hed[:, :, 0] #hematoxylim
glom_h = ia.ianormalize(glom_h)
selem = disk(10) #elemento estruturante
glom_h = np.array(glom_h) 
glom_h = 255 - uint8(glom_h)

#Segmentation
glom_by_reconsTopHat = morph.closerecth(glom_h,selem) #reconstrução morfológicas de fechamento

global_thresh = threshold_otsu(glom_by_reconsTopHat) #Otsu
glom_bin = glom_by_reconsTopHat > global_thresh + global_thresh*0.1 
glom_bin = img_as_ubyte(glom_bin)
selem = disk(3)    
glom_seg = morph.open(glom_bin, selem)
glom_seg = morph.close(glom_seg, selem) #Fechamento final

#Mostra as etapas
fig, axes = plt.subplots(2, 3, figsize=(14, 10))
fig.suptitle('Preprocessing, segmentation') 
ax1, ax2, ax3, ax4, ax5, ax6 = axes.ravel()
ax1.imshow(glom_rgb, vmin=0, vmax=255, cmap=plt.cm.gray); ax1.set_title("RGB")
ax2.imshow(glom_hed, vmin=0, vmax=255, cmap=plt.cm.gray); ax2.set_title("HED") 
ax3.imshow(glom_h,  cmap=plt.cm.gray); ax3.set_title("255 - H (Hematoxylin)") 
ax4.imshow(glom_by_reconsTopHat, vmin=0, vmax=255, cmap=plt.cm.gray); ax4.set_title("Closing-by-reconstruction top-hat") 
ax5.imshow(glom_bin, vmin=0, vmax=255, cmap=plt.cm.gray); ax5.set_title("Otsu Thresholding") 
ax6.imshow(glom_seg, vmin=0, vmax=255, cmap=plt.cm.gray); ax6.set_title("Opening") 
#------------------------------------------------------------------------------------------------
#Feature Extraction
print "Feature Extraction:"
label_img = label(glom_seg) #Identifica formas através da vizinhaça 4 E 8
コード例 #8
0
    def alternative_solution(self,
                             a,
                             orientation='coronal',
                             linethickness=10,
                             outimg=False):
        '''
        Paramenters
        -----------
        a: original image in graylevel
        '''
        H, W = a.shape
        if orientation == 'coronal':
            # UL = mm.limits(a)[1]  # upper limit
            UL = 255

            b = 1 - iacircle(a.shape, H / 3, (1.4 * H / 3, W / 2))  # Circle
            b = b[0:70, W / 2 - 80:W / 2 + 80]  # Rectangle
            # if outimg:
            #     b_ = 0 * a; b_[0:70, W / 2 - 80:W / 2 + 80] = UL * b  # b_ only for presentation
            #     b_[:, W / 2 - linethickness / 2:W / 2 + linethickness / 2] = UL  # b_ only for presentation

            c = a + 0
            c[:, W / 2 - linethickness / 2:W / 2 + linethickness / 2] = UL
            c[0:70, W / 2 - 80:W / 2 +
              80] = (1 - b) * c[0:70, W / 2 - 80:W / 2 + 80] + b * UL
            c[0:40, W / 2 - 70:W / 2 + 70] = UL

            d = mm.open(c, mm.img2se(mm.binary(np.ones((20, 10)))))

            e = mm.close(d, mm.seline(5))

            f = mm.close_holes(e)

            g = mm.subm(f, d)

            h = mm.close_holes(g)

            i = mm.areaopen(h, 1000)

            j1, j2 = iaotsu(i)
            # j = i > j1
            ret, j = cv2.threshold(cv2.GaussianBlur(i, (7, 7), 0), j1, 255,
                                   cv2.THRESH_BINARY)

            k = mm.open(j, mm.seline(20, 90))

            l = mm.areaopen(k, 1000)

            # m = mm.label(l)

            res = np.vstack(
                [np.hstack([c, d, e, f, g]),
                 np.hstack([h, i, j, k, l])])
            cv2.imshow('Result', res)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

            ################################
            # l_ = mm.blob(k,'AREA','IMAGE')
            # l = l_ == max(ravel(l_))

            # m = mm.open(l, mm.sedisk(3))  # VERIFICAR O MELHOR ELEMENTO ESTRUTURANTE AQUI

            # n = mm.label(m)

            if outimg:
                if not os.path.isdir('outimg'):
                    os.mkdir('outimg')

                def N(x):
                    # y = uint8(ianormalize(x, (0, 255)) + 0.5)
                    y = (ianormalize(x, (0, 255)) + 0.5).astype(np.uint8)
                    return y

                adwrite('outimg/a.png', N(a))
                adwrite('outimg/b.png', N(b_))
                adwrite('outimg/c.png', N(c))
                adwrite('outimg/d.png', N(d))
                adwrite('outimg/e.png', N(e))
                adwrite('outimg/f.png', N(f))
                adwrite('outimg/g.png', N(g))
                adwrite('outimg/h.png', N(h))
                adwrite('outimg/i.png', N(i))
                adwrite('outimg/j.png', N(j))
                adwrite('outimg/k.png', N(k))
                adwrite('outimg/l.png', N(l))
                adwrite('outimg/m.png', N(m))
                # adwrite('outimg/n.png', N(n))

            return m

        else:
            b = mm.areaopen(a, 500)

            c = mm.close(b, mm.sebox(3))

            d = mm.close_holes(c)

            e = mm.subm(d, c)

            f = mm.areaopen(e, 1000)

            # g = f > 5
            ret, g = cv2.threshold(cv2.GaussianBlur(f, (5, 5), 0), 3, 255,
                                   cv2.THRESH_BINARY)
            # ret, g = cv2.threshold(
            #     cv2.GaussianBlur(f, (7, 7), 0),
            #     5, 255,
            #     cv2.THRESH_BINARY_INV)

            h = mm.asf(g, 'CO', mm.sedisk(5))

            i = mm.close_holes(h)

            res = np.vstack(
                [np.hstack([a, b, c, d, e]),
                 np.hstack([f, g, h, i, a])])
            cv2.imshow('Result', res)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

            if outimg:
                if not os.path.isdir('outimg'):
                    os.mkdir('outimg')

                def N(x):
                    y = (ianormalize(x, (0, 255)) + 0.5).astype(np.uint8)
                    return y

                adwrite('outimg/a.png', N(a))
                adwrite('outimg/b.png', N(b))
                adwrite('outimg/c.png', N(c))
                adwrite('outimg/d.png', N(d))
                adwrite('outimg/e.png', N(e))
                adwrite('outimg/f.png', N(f))
                adwrite('outimg/g.png', N(g))
                adwrite('outimg/h.png', N(h))
                adwrite('outimg/i.png', N(i))

            return i
コード例 #9
0
ファイル: prop.py プロジェクト: Tarrasch/ravens-test
def region_prop(fig, subfig):
  # Inspired by:
  # http://stackoverflow.com/a/9059648/621449
  c = subfig

# set up the 'FilledImage' bit of regionprops.
  FilledImage = np.zeros(fig.shape[0:2]).astype('uint8')
# set up the 'ConvexImage' bit of regionprops.
  ConvexImage = np.zeros(fig.shape[0:2]).astype('uint8')
# calculate some things useful later:
  m = cv2.moments(c)

# ** regionprops **
  Area          = m['m00']
  Perimeter     = cv2.arcLength(c,True)
# bounding box: x,y,width,height
  BoundingBox   = cv2.boundingRect(c)
# centroid    = m10/m00, m01/m00 (x,y)
  Centroid      = ( m['m10']/m['m00'],m['m01']/m['m00'] )

# EquivDiameter: diameter of circle with same area as region
  EquivDiameter = np.sqrt(4*Area/np.pi)
# Extent: ratio of area of region to area of bounding box
  Extent        = Area/(BoundingBox[2]*BoundingBox[3])

# FilledImage: draw the region on in white
  cv2.drawContours( FilledImage, [c], 0, color=255, thickness=-1 )
# calculate indices of that region..
  regionMask    = (FilledImage==255)
# FilledArea: number of pixels filled in FilledImage
  FilledArea    = np.sum(regionMask)
# PixelIdxList : indices of region.
# (np.array of xvals, np.array of yvals)
  PixelIdxList  = regionMask.nonzero()

# CONVEX HULL stuff
# convex hull vertices
  ConvexHull    = cv2.convexHull(c)
  ConvexArea    = cv2.contourArea(ConvexHull)
# Solidity := Area/ConvexArea
  Solidity      = Area/ConvexArea
# convexImage -- draw on ConvexImage
  cv2.drawContours( ConvexImage, [ConvexHull], -1,
                    color=255, thickness=-1 )

# ELLIPSE - determine best-fitting ellipse.
  centre,axes,angle = cv2.fitEllipse(c)
  MAJ = np.argmax(axes) # this is MAJor axis, 1 or 0
  MIN = 1-MAJ # 0 or 1, minor axis
# Note: axes length is 2*radius in that dimension
  MajorAxisLength = axes[MAJ]
  MinorAxisLength = axes[MIN]
  Eccentricity    = np.sqrt(1-(axes[MIN]/axes[MAJ])**2)
  Orientation     = angle
  EllipseCentre   = centre # x,y

  Test = FilledImage.astype('uint8')
  mf = cv2.moments(Test)
  CentroidFilled = ( mf['m10']/mf['m00'],mf['m01']/mf['m00'] )

# # ** if an image is supplied with the fig:
# # Max/Min Intensity (only meaningful for a one-channel img..)
#   MaxIntensity  = np.max(img[regionMask])
#   MinIntensity  = np.min(img[regionMask])
# # Mean Intensity
#   MeanIntensity = np.mean(img[regionMask],axis=0)
# # pixel value
#   PixelValues   = img[regionMask]
  x0, y0, dx, dy = BoundingBox
  x1, y1 = x0 + dx, y0 + dy
  Image = fig[y0:y1, x0:x1]
  FilledImageFit = FilledImage[y0:y1, x0:x1]
  OImage = fig[y0-1:y1+1, x0-1:x1+1]
  NumPixels  = Image.sum()
  Fillity = (NumPixels+0.0)/FilledArea
  crx, cry = (CentroidFilled[0]-x0, CentroidFilled[1]-y0)
  dxc = crx-(x1-x0)/2.0
  dyc = cry-(y1-y0)/2.0
  CentLength = math.sqrt(dxc*dxc + dyc*dyc)

  e = lambda fig: pymorph.erode(fig)
  d = lambda fig: pymorph.dilate(fig)
  o = lambda fig: pymorph.open(fig)
  c = lambda fig: pymorph.close(fig)
  a = lambda fun, n: reduce(lambda f1, f2: lambda x: f1(f2(x)), [fun]*n, lambda x: x)

  Thin = pymorph.thin(OImage)
  if num_holes(Image) >= 2:
    Inner = removeOuter(Thin)
    Inner = (a(d,7))(Inner>0)
    Outer = OImage > Inner

  ret = dict((k,v) for k, v in locals().iteritems() if k[0].isupper())
  return ret