Beispiel #1
0
def initialize_distance_fi(img):
    if init_sdf_mthd == 'otsu':
        iimg = img * 255
        iimg = iimg.astype('uint8')

        thr = mahotas.otsu(iimg)
        timg = iimg * (iimg < thr)
    
        nucleusT = mahotas.otsu(timg)
        cytoplasm = timg > nucleusT
    
        tmp = np.ones_like(img) * -1. * initial_ls_val
        tmp[cytoplasm] = 1. * initial_ls_val
        
        if grad_ini_fi:
            gy,gx = np.gradient(tmp)
            gtmp = gy+gx
            tmp = tmp * (gtmp == 0.)
    elif init_sdf_mthd == 'rcluster':
        tmp = np.random.random(img.shape)
        tmp[tmp < 0.5] =  -1. * initial_ls_val
        tmp[tmp > -1.] =  1. * initial_ls_val
    else:
        tmp = np.random.random(img.shape)
    
    return tmp 
def segment_layer(filename, params):
	'''
	Segment one layer in a stack
	'''
	#extract pixel size in xy and z
	xsize, zsize = extract_zoom(params.folder)

	#load image
	img = tifffile.imread(params.inputfolder + params.folder + filename)

	#normalize image
	img = ndimage.median_filter(img, 3)
	per_low = np.percentile(img, 5)
	img[img < per_low] = per_low
	img = img - img.min()

	per_high = np.percentile(img, 99)
	img[img > per_high] = per_high
	img = img*255./img.max()


	imgf = ndimage.gaussian_filter(img*1., 30./xsize).astype(np.uint8)

	kmask = (imgf > mahotas.otsu(imgf.astype(np.uint8)))*255.

	sizefactor = 10
	small = ndimage.interpolation.zoom(kmask, 1./sizefactor)	#scale the image to a smaller size

	rad = int(300./xsize)

	small_ext = np.zeros([small.shape[0] + 4*rad, small.shape[1] + 4*rad])
	small_ext[2*rad : 2*rad + small.shape[0], 2*rad : 2*rad + small.shape[1]] = small

	small_ext = mahotas.close(small_ext.astype(np.uint8), mahotas.disk(rad))
	small = small_ext[2*rad : 2*rad + small.shape[0], 2*rad : 2*rad + small.shape[1]]
	small = mahotas.close_holes(small)*1.			
	small = small*255./small.max()

	kmask = ndimage.interpolation.zoom(small, sizefactor)	#scale back to normal size
	kmask = normalize(kmask)
	kmask = (kmask > mahotas.otsu(kmask.astype(np.uint8)))*255.	#remove artifacts of interpolation

	if np.median(imgf[np.where(kmask > 0)]) < (np.median(imgf[np.where(kmask == 0)]) + 1)*3:
		kmask = np.zeros_like(kmask)


	#save indices of the kidney mask
#	ind = np.where(kmask > 0)
#	ind = np.array(ind)
#	np.save(params.inputfolder + '../segmented/masks/' + params.folder + filename[:-4] + '.npy', ind)

	#save outlines
	im = np.zeros([img.shape[0], img.shape[1], 3])
	img = tifffile.imread(params.inputfolder + params.folder + filename)
	im[:,:,0] = im[:,:,1] = im[:,:,2] = np.array(img)
	output = overlay(kmask, im, (255,0,0), borders = True)
	tifffile.imsave(params.inputfolder + '../segmented/outlines/' + params.folder + filename[:-4] + '.tif', (output).astype(np.uint8))
def method2(image, sigma):
    image = mh.imread(image)[:, :, 0]
    image = mh.gaussian_filter(image, sigma)
    image = mh.stretch(image)
    binimage = image > mh.otsu(image)
    labeled, _ = mh.label(binimage)
    return labeled
def segment_glomeruli2d(input_file, tissue_mask_file, output_file, voxel_xy):
    kmask = io.imread(tissue_mask_file)
    if kmask.max() == 0:
        tifffile.imsave(output_file, kmask, compress=5)
        return

    # normalize image
    img = io.imread(input_file)
    img = ndimage.median_filter(img, 3)
    img = img * 255. / img.max()

    # remove all intensity variations larger than maximum radius of a glomerulus
    d = mahotas.disk(int(float(glomeruli_maxrad) / voxel_xy))
    img = img - mahotas.open(img.astype(np.uint8), d)
    img = img * 255. / img.max()
    ch = img[np.where(kmask > 0)]

    # segment glomeruli by otsu thresholding	only if this threshold is higher than the 75-th percentile in the kidney mask
    t = mahotas.otsu(img.astype(np.uint8))

    cells = None

    if t > np.percentile(ch, 75) * 1.5:
        cells = img > t
        cells[np.where(kmask == 0)] = 0
        cells = mahotas.open(
            cells, mahotas.disk(int(float(glomeruli_minrad) / 2. / voxel_xy)))
    else:
        cells = np.zeros_like(img)

    tifffile.imsave(output_file, img_as_ubyte(cells), compress=5)
Beispiel #5
0
    def calculateMask(self, img):
        print 'Masking input'
        if len(np.unique(img)) <= 2:
            print 'Binary input detected, no thresholding performed'
            idx1 = np.where(img == np.unique(img)[0])
            idx2 = np.where(img == np.unique(img)[1])
            img[idx1] = False
            img[idx2] = True
        else:
            print 'Grey input detected'
            T = m.otsu(img, ignore_zeros=False)
            T = T * self.__scale
            img = self.threshold_adaptive(img,
                                          80,
                                          'gaussian',
                                          offset=-20,
                                          param=T)
            img = m.morph.open(img)

        img = m.morph.close(img)
        ''' just a quick fix of the dilation function that caused the binary image to consist of 0 and 2. Now It should be a real binary image '''
        idx1 = np.where(img == np.unique(img)[0])
        idx2 = np.where(img == np.unique(img)[1])
        img[idx1] = 0
        img[idx2] = 255

        w, h = np.shape(img)
        img[0, :] = 0
        img[:, 0] = 0
        img[w - 1, :] = 0
        img[:, h - 1] = 0
        return img
    def ChooseFile(self):
        options = QFileDialog.Options()
        options |= QFileDialog.DontUseNativeDialog
        fileName, _ = QFileDialog.getOpenFileName(self, "QFileDialog.getOpenFileName()", "",
                                                        "Excel Files (*.xls *.xlsx *.xlsm);; "
                                                        "Tiff Image Files (*.tif);;"
                                                        "Other Image Files (*.jpg *png);;"
                                                        " All Files (*)", options=options)

        if fileName:
            if fileName.endswith('.xls') or fileName.endswith('.xlsx') or fileName.endswith('.xlsm'):
                self.Line_filename.setText('File Uploaded from: '+str(fileName))
                self.seq_dataset = pd.read_excel(fileName, index_col=0);
                self.Button_Run.setEnabled(True); self.Button_ShowImage.setEnabled(False);
                self.Slider_Sigma.setEnabled(False); self.Slider_Dilate.setEnabled(False);
            elif fileName.endswith('.tif') or fileName.endswith('.png') or fileName.endswith('.jpg'):
                self.Line_filename.setText('File Uploaded from: '+str(fileName))
                img = pil.open(fileName); self.img_array = mh.stretch(np.array(img)); img.close()
                self.Button_ShowImage.setEnabled(True); self.Button_Run.setEnabled(False)
                # Load basic threshold params
                self.T_otsu = mh.otsu(self.img_array); self.T_mean = self.img_array.mean()
            else:
                self.Label_status.setText('There appears to be something wrong with your input file')

            self.Button_SNR.setEnabled(False); self.Check_TissueMaxima.setEnabled(False)
            self.Button_DAPI.setEnabled(False);
        else:
            self.Label_status.setText('File not uploaded')
Beispiel #7
0
    def calculateMask(self,img):
        print 'Masking input'
        if len(np.unique(img))<=2:
            print 'Binary input detected, no thresholding performed'
            idx1=np.where(img==np.unique(img)[0])
            idx2=np.where(img==np.unique(img)[1])
            img[idx1]=False
            img[idx2]=True
        else:
            print 'Grey input detected'
            T=m.otsu(img,ignore_zeros=False)
            T=T*self.__scale
            img = self.threshold_adaptive(img, 80, 'gaussian',offset=-20,param=T)
            img = m.morph.open(img)

        img = m.morph.close(img)
        ''' just a quick fix of the dilation function that caused the binary image to consist of 0 and 2. Now It should be a real binary image '''
        idx1=np.where(img==np.unique(img)[0])
        idx2=np.where(img==np.unique(img)[1])
        img[idx1]=0
        img[idx2]=255
        
        w,h=np.shape(img)
        img[0,:]=0
        img[:,0]=0
        img[w-1,:]=0
        img[:,h-1]=0
        return img
def filterImage(image):
    """
    Filters the given image and returns a binary representation of it.
    """
    
    # otsu to bring out edges
    t_loc_otsu = otsu(image[:, :, 1])
    loc_otsu = np.zeros_like(image, dtype=np.bool)
    loc_otsu[:, :, 1] = image[:, :, 1] <= t_loc_otsu + 5
    image[loc_otsu] = 0
    
    # bring out single particles and smooth the rest
    foot = circarea(8)
    green = rank_filter(image[:,:,1], foot, rank=44)
    nonzero = green > 10
    weak = (green > 20) & (green < green[nonzero].mean())
    green[weak] += 40
    
    # remove pollution
    gray = cv2.medianBlur(green, ksize=13)
    
    # black and white representation of particles and surroundings
    binary = gray < 25
    
    # dilatation and erosion
    dilated1 = ndimage.binary_dilation(binary, iterations=6)
    erosed = ndimage.binary_erosion(dilated1, iterations=_EROSIONFACTOR+3)
    dilated = ndimage.binary_dilation(erosed, iterations=_EROSIONFACTOR)
    return dilated
Beispiel #9
0
def method2(image, sigma):
    image = mh.imread(image)[:, :, 0]
    image = mh.gaussian_filter(image, sigma)
    image = mh.stretch(image)
    binimage = (image > mh.otsu(image))
    labeled, _ = mh.label(binimage)
    return labeled
Beispiel #10
0
 def __init__(self,
              label_image,
              intensity_image,
              theta_range=4,
              frequencies={1, 5, 10},
              radius={1, 5, 10},
              scales={1},
              threshold=None,
              compute_haralick=False,
              compute_TAS=False,
              compute_LBP=False):
     '''
     Parameters
     ----------
     label_image: numpy.ndarray[numpy.int32]
         labeled image encoding objects (connected pixel components)
         for which features should be extracted
     intensity_image: numpy.ndarray[numpy.uint16 or numpy.uint8]
         grayscale image from which texture features should be extracted
     theta_range: int, optional
         number of angles to define the orientations of the Gabor
         filters (default: ``4``)
     frequencies: Set[int], optional
         frequencies of the Gabor filters (default: ``{1, 5, 10}``)
     scales: Set[int], optional
         scales at which to compute the Haralick textures (default: ``{1}``)
     threshold: int, optional
         threshold value for Threshold Adjacency Statistics (TAS)
         (defaults to value computed by Otsu's method)
     radius: Set[int], optional
         radius for defining pixel neighbourhood for Local Binary Patterns
         (LBP) (default: ``{1, 5, 10}``)
     compute_haralick: bool, optional
         whether Haralick features should be computed
         (the computation is computationally expensive) (default: ``False``)
     '''
     super(Texture, self).__init__(label_image, intensity_image)
     self.theta_range = theta_range
     self.frequencies = frequencies
     self.radius = radius
     self.scales = scales
     if threshold is None:
         self._threshold = mh.otsu(intensity_image)
     else:
         if not isinstance(threshold, int):
             raise ValueError('Argument "threshold" must have type int.')
         self._threshold = threshold
     self._clip_value = np.percentile(intensity_image, 99.999)
     if not isinstance(theta_range, int):
         raise TypeError('Argument "theta_range" must have type int.')
     if not all([isinstance(f, int) for f in self.frequencies]):
         raise TypeError(
             'Elements of argument "frequencies" must have type int.')
     if not all([isinstance(s, int) for s in self.scales]):
         raise TypeError(
             'Elements of argument "scales" must have type int.')
     self.compute_haralick = compute_haralick
     self.compute_TAS = compute_TAS
     self.compute_LBP = compute_LBP
Beispiel #11
0
def thresholding(image, inv=False):
    grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    thresh_type = cv2.THRESH_BINARY
    if inv:
        thresh_type = cv2.THRESH_BINARY_INV
    blurred = GaussianBlur(grayscale, 7)
    T = mahotas.otsu(blurred)
    thresh = cv2.threshold(blurred, T, 255, thresh_type)[1]
    return thresh
Beispiel #12
0
	def global_threshold(self, im_co, vis_pix, args): #do global thresholding to isolate slow fibers
		co_pix = np.array(im_co.getdata(),dtype=np.uint8)
		co_pix = co_pix*vis_pix
		T_otsu = mh.otsu(co_pix.reshape(im_co.size[1],im_co.size[0]))
		thresholded_copix = (co_pix*(co_pix > T_otsu))
		#thresholded_copix = si.grey_erosion(np.array(thresholded_copix).reshape(im_co.size[1],im_co.size[0]), size=(3,3))
		thresholded_copix = si.grey_closing(np.array(thresholded_copix).reshape(im_co.size[1],im_co.size[0]), size=(10,10))
		#thresholded_copix = si.grey_closing(thresholded_copix, size=(3,3))
		return thresholded_copix
Beispiel #13
0
def binarization(im, x, y, window):
    N, M = im.shape
    r = zeros_like(im)
    wx, wy = (int(N / x), int(M / y))
    aux = zeros((wx, wy))
    if (window == 1):
        for i in range(0, x):
            for j in range(0, y):
                aux = im[i * wx:(i + 1) * wx, j * wy:(j + 1) * wy]
                photo = aux.astype(np.uint8)
                T_otsu = mahotas.otsu(photo)
                binary = photo > T_otsu
                r[i * wx:(i + 1) * wx, j * wy:(j + 1) * wy] = binary
    else:
        photo = im.astype(np.uint8)
        T_otsu = mahotas.otsu(photo)
        r = photo > T_otsu
    return r
Beispiel #14
0
def findobject(file):
    """finds objects.  Expects a smoothed rectified amplitude envelope"""
    value = (otsu(np.array(file, dtype=np.uint32))) / 2  #calculate a threshold
    #value=(np.average(file))/2 #heuristically, this also usually works  for establishing threshold
    thresh = threshold(file, value)  #threshold the envelope data
    thresh = threshold(sc.ndimage.convolve(thresh, np.ones(512)),
                       0.5)  #pad the threshold
    label = (sc.ndimage.label(thresh)[0])  #label objects in the threshold
    objs = sc.ndimage.find_objects(label)  #recover object positions
    return (objs)
Beispiel #15
0
def __test_lena():
    print "> Testing lena"
    from scipy.misc.common import lena
    from scipy.misc import imsave
    l = lena().astype('uint8')
    o = mahotas.otsu(l)
    #bl = np.empty(l.shape, dtype='uint8')
    bl = l >= o
    s = smooth(bl, N) 
    print s.max(), s.min()
    imsave('smoothed_lena.png', s)
    imsave('lena.png', bl)
Beispiel #16
0
    def thresholdimage(self, factor=1.0, erode=True):
        """Thresholding with optional erosion"""

        if not hasattr(self, "filtered"):
            self.filterimage()

        if self.load_archive("threshold"):
            return

        limit = factor*mahotas.otsu(self.filtered)
        self.threshold = self.filtered > limit
        if erode:
            self.threshold = pymorph.erode(self.threshold)
Beispiel #17
0
def find_centroids(img, bkg=None, threshold=None, min_size=0):
    if bkg is None:
        bkg = gaussian(img, 10)
    bkg.astype(np.uint16)
    img = convert(img-bkg,np.uint8) #<- This changes the execution speed ~5-fold
    if threshold is None:
        threshold = mh.otsu(img)
#     mask = img>threshold
#     labels = np.array(mh.label(img>threshold)[0])
    labels = label(img>threshold)  # <- This is faster
    props = regionprops(labels, img, cache=True)  # <- Cache True is faster
#     num_pixels = [p['filled_area'] for p in props]
    centroids = [p['centroid'] for p in props if p['filled_area']>=min_size]
    return centroids
def thresholdCube(outputArray):
    """
    Author: Sindhura Thirumal
    Takes in array outputted by trainModel and thresholds to create a binary image.
    Output is the binary image array.
    """
    outputArray *= 255.0 / outputArray.max(
    )  # Scale image to be in range 0-255
    intImg = outputArray.astype(
        np.uint8)  # Convert data type of array to uint8
    for i in range(intImg.shape[2]):
        otsuThreshVal = mh.otsu(intImg[i])  # Otsu thresholding
        intImg[i] = intImg[i] > otsuThreshVal
    return intImg
Beispiel #19
0
def peppers():
    # This last image is the peppers.png file
    my_image = mh.imread(filename3)
    T = mh.otsu(my_image)
    b_image = (my_image > T)
    g_image = mh.gaussian_filter(b_image, 15)
    rmax = mh.regmax(g_image)
    labeled, nr_objects = mh.label(rmax)
    centers = mh.center_of_mass(my_image, labeled)[1:]
    print "The peppers.png file contains ", nr_objects, " objects."
    o = 1
    for center in centers:
        print "Object %s center: [ %s, %s ]"               %(o, round(center[1], 0), round(center[0], 0))
        o = o + 1
def line_scan(photo, line_sep=10):
    """Find points in the image that lie on a series of repeating lines."""
    photo = photo * 1.5
    photo[photo > 255] = 255
    photo = photo.astype(np.uint8)
    height, width = photo.shape
    gray()
    T_photo = mahotas.otsu(photo)
    photo = (photo < T_photo)
    percent = .5  #0.29
    rang = range(int(percent * height), int(0.9 * height), line_sep)
    array = np.zeros(photo.shape)
    array[rang, :] = photo[rang, :] == False
    #print(array.shape)
    return array  #pixel_vec
Beispiel #21
0
def peppers():
    # This last image is the peppers.png file
    my_image = mh.imread(filename3)
    T = mh.otsu(my_image)
    b_image = (my_image > T)
    g_image = mh.gaussian_filter(b_image, 15)
    rmax = mh.regmax(g_image)
    labeled, nr_objects = mh.label(rmax)
    centers = mh.center_of_mass(my_image, labeled)[1:]
    print "The peppers.png file contains ", nr_objects, " objects."
    o = 1
    for center in centers:
        print "Object %s center: [ %s, %s ]" % (o, round(
            center[1], 0), round(center[0], 0))
        o = o + 1
Beispiel #22
0
 def __init__(self, label_image, intensity_image,
         theta_range=4, frequencies={1, 5, 10}, radius={1, 5, 10},
         threshold=None, compute_haralick=False):
     '''
     Parameters
     ----------
     label_image: numpy.ndarray[numpy.int32]
         labeled image encoding objects (connected pixel components)
         for which features should be extracted
     intensity_image: numpy.ndarray[numpy.uint16 or numpy.uint8]
         grayscale image from which texture features should be extracted
     theta_range: int, optional
         number of angles to define the orientations of the Gabor
         filters (default: ``4``)
     frequencies: Set[int], optional
         frequencies of the Gabor filters (default: ``{1, 5, 10}``)
     threshold: int, optional
         threshold value for Threshold Adjacency Statistics (TAS)
         (defaults to value computed by Otsu's method)
     radius: Set[int], optional
         radius for defining pixel neighbourhood for Local Binary Patterns
         (LBP) (default: ``{1, 5, 10}``)
     compute_haralick: bool, optional
         whether Haralick features should be computed
         (the computation is computationally expensive) (default: ``False``)
     '''
     super(Texture, self).__init__(label_image, intensity_image)
     self.theta_range = theta_range
     self.frequencies = frequencies
     self.radius = radius
     if threshold is None:
         self._threshold = mh.otsu(intensity_image)
     else:
         if not isinstance(threshold, int):
             raise ValueError('Argument "threshold" must have type int.')
         self._threshold = threshold
     self._clip_value = np.percentile(intensity_image, 99.999)
     if not isinstance(theta_range, int):
         raise TypeError(
             'Argument "theta_range" must have type int.'
         )
     if not all([isinstance(f, int) for f in self.frequencies]):
         raise TypeError(
             'Elements of argument "frequencies" must have type int.'
         )
     self.compute_haralick = compute_haralick
	def convertGStoOtsu(self):
		global counter
		global fdr
		global now
		imageOtsu = mahotas.imread(GS_path+str(counter)+".jpg", as_grey=True)	
		imageOtsu= imageOtsu.astype(np.uint8)
        	ThresholdOtsu = mahotas.otsu(imageOtsu)
		ax = pylab.axes([0,0,1,1], frameon=False)
        	ax.set_axis_off()
        	im = pylab.imshow(imageOtsu > ThresholdOtsu)
        	pylab.savefig(Otsu_path+str(counter)+".jpg")
		now = datetime.datetime.now()
		currentTime = str(now.hour)+":"+str(now.minute)+":"+str(now.second)
		camUpdateStatus = [currentTime+"\t\tCamera\t\t\t\tConverting.\t\t"+Otsu_path+str(counter)+".jpg"+"\n"]
		fdr.writeToFDR(camUpdateStatus)	
		fdr.closeFDR()
		self.generatePixelData()
def watershedSegment(image, diskSize=20):
    gradmag = gradientMagnitudue(image)

    ## compute foreground markers

    # open image to create flat regions at cell centers
    se_disk = pymorph.sedisk(diskSize) 
    image_opened = mahotas.open(image, se_disk);

    # define foreground markers as regional maxes of cells
    # this step is slow!
    foreground_markers = mahotas.regmax(image_opened)

    ## compute background markers

    # Threshold the image, cast it to the right datatype, and then calculate the distance image
    image_black_white = image_opened > mahotas.otsu(image_opened)
    image_black_white = image_black_white.astype('uint16')

    # note the inversion here- a key difference from the matlab algorithm
    # matlab distance is to nearest non-zero pixel
    # python distance is to nearest 0 pixel
    image_distance = pymorph.to_uint16(nd.distance_transform_edt(np.logical_not(image_black_white)))
    eight_conn = pymorph.sebox()

    distance_markers = mahotas.label(mahotas.regmin(image_distance, eight_conn))[0]
    image_dist_wshed, image_dist_wshed_lines =mahotas.cwatershed(image_distance, distance_markers, eight_conn, return_lines=True)
    background_markers = image_distance_watershed_lines - image_black_white

    all_markers = np.logical_or(foreground_markers, background_markers)

    # impose a min on the gradient image.  assumes int64
    gradmag2 = imimposemin(gradmag.astype(int), all_markers, eight_conn)

    # call watershed
    segmented_cells, segmented_cell_lines = mahotas.cwatershed(gradmag2, mahotas.label(all_markers)[0], eight_conn, return_lines=True)

    # seperate watershed regions
    segmented_cells[gradientMagnitudue(segmented_cells) > 0] = 0
    return segmented_cells > 0, segmented_cells
def segment_tissue2d(input_file, output_file, voxel_xy):
    # load image
    img = io.imread(input_file)

    # normalize image
    img = ndimage.median_filter(img, 3)
    img = img * 255. / img.max()

    ##segment kidney tissue

    sizefactor = 10.
    small = ndimage.interpolation.zoom(
        img, 1. / sizefactor)  # scale the image to a smaller size

    imgf = ndimage.gaussian_filter(small, 3. / voxel_xy)  # Gaussian filter
    median = np.percentile(imgf, 40)  # 40-th percentile for thresholding

    kmask = imgf > median * 1.5  # thresholding
    kmask = mahotas.dilate(kmask, mahotas.disk(5))
    kmask = mahotas.close_holes(kmask)  # closing holes
    kmask = mahotas.erode(kmask, mahotas.disk(5)) * 255

    # remove objects that are darker than 2*percentile
    l, n = ndimage.label(kmask)
    llist = np.unique(l)
    if len(llist) > 2:
        means = ndimage.mean(imgf, l, llist)
        bv = llist[np.where(means < median * 2)]
        ix = np.in1d(l.ravel(), bv).reshape(l.shape)
        kmask[ix] = 0

    kmask = ndimage.interpolation.zoom(kmask,
                                       sizefactor)  # scale back to normal size
    kmask = normalize(kmask)
    kmask = (kmask > mahotas.otsu(kmask.astype(np.uint8))
             )  # remove artifacts of interpolation

    tifffile.imsave(output_file, img_as_ubyte(kmask), compress=5)
def CountObjects(img6d):

    obj = np.zeros(img6d.shape[0])

    print('Start Processing ...',)
    steps = img6d.shape[0]/10

    # count cells with individual thresholds per frame
    for i in range(0, img6d.shape[0], 1):

        img = img6d[i, 0, 0, 0, :, :]
        T = mahotas.otsu(img)
        img = (img > T)
        img = mahotas.gaussian_filter(img, 0.5)
        labeled, numobjects = mahotas.label(img)
        obj[i] = numobjects
        if i % steps == 0:
            print('\b.',)
            sys.stdout.flush()

    print('Done!',)

    return obj, labeled
Beispiel #27
0
def main(image, correction_factor=1, min_threshold=None, max_threshold=None,
        plot=False):
    '''Thresholds an image by applying an automatically determined global
    threshold level using
    `Otsu's method <https://en.wikipedia.org/wiki/Otsu%27s_method>`_.

    Additional parameters allow correction of the calculated threshold
    level or restricting it to a defined range. This may be useful to prevent
    extreme levels in case the `image` contains artifacts. Setting
    `min_threshold` and `max_threshold` to the same value results in a
    manual thresholding.

    Parameters
    ----------
    image: numpy.ndarray[numpy.uint8 or numpy.unit16]
        grayscale image that should be thresholded
    correction_factor: int, optional
        value by which the calculated threshold level will be multiplied
        (default: ``1``)
    min_threshold: int, optional
        minimal threshold level (default: ``numpy.min(image)``)
    max_threshold: int, optional
        maximal threshold level (default: ``numpy.max(image)``)
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.threshold_otsu.Output[Union[numpy.ndarray, str]]
    '''
    if max_threshold is None:
        max_threshold = np.max(image)
    logger.debug('set maximal threshold: %d', max_threshold)

    if min_threshold is None:
        min_threshold = np.min(image)
    logger.debug('set minimal threshold: %d', min_threshold)
    logger.debug('set threshold correction factor: %.2f', correction_factor)

    threshold = mh.otsu(image)
    logger.info('calculated threshold level: %d', threshold)

    corr_threshold = threshold * correction_factor
    logger.info('corrected threshold level: %d', corr_threshold)

    if corr_threshold > max_threshold:
        logger.info('set threshold level to maximum: %d', max_threshold)
        corr_threshold = max_threshold
    elif corr_threshold < min_threshold:
        logger.info('set threshold level to minimum: %d', min_threshold)
        corr_threshold = min_threshold

    logger.info('threshold image at %d', corr_threshold)
    mask = image > corr_threshold

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        outlines = mh.morph.dilate(mh.labeled.bwperim(mask))
        plots = [
            plotting.create_intensity_overlay_image_plot(
                image, outlines, 'ul'
            ),
            plotting.create_mask_image_plot(mask, 'ur')
        ]
        figure = plotting.create_figure(
            plots, title='thresholded at %s' % corr_threshold
        )
    else:
        figure = str()

    return Output(mask, figure)
Beispiel #28
0
import mahotas
import cv2

from os import path
luispedro_image = path.join(
            path.dirname(mahotas.__file__),
            'demos',
            'data',
            'luispedro.jpg')
f = mahotas.imread(luispedro_image, as_grey=True)
markers = np.zeros_like(f)
markers[100,100] = 1
markers[200,200] = 2
f = f.astype(np.uint8)
markers = markers.astype(int)
otsu = mahotas.otsu(f.astype(np.uint8))
fbin = f > otsu
fbin8 = fbin.astype(np.uint8)
Bc = np.eye(3)
Bc = Bc.astype(bool)
Bc8 = Bc.astype(np.uint8)
f3 = np.dstack([f,f,f])
f3 = f3.astype(np.uint8)
f3 = f3.copy()
filt = np.array([
    [1,0,-1,0],
    [2,2,3,-2],
    [-1,0,0,1]
    ])
markers32 = markers.astype(np.int32)
import matplotlib.pyplot as plt
import numpy as np
import mahotas
photo = mahotas.demos.load('luispedro', as_grey=True)
photo = photo.astype(np.uint8)
T_otsu = mahotas.otsu(photo)
plt.imshow(photo > T_otsu, cmap='gray')
plt.axis('off')
plt.show()
Beispiel #30
0
def segment_layer(filename, params):
    '''
	Segment one layer in a stack
	'''
    start = time.time()
    #extract pixel size in xy and z
    xsize, zsize = extract_zoom(params.folder)

    #load image
    img = tifffile.imread(params.inputfolder + params.folder + filename)

    #normalize image
    img = ndimage.median_filter(img, 3)
    img = img * 255. / img.max()

    ##segment kidney tissue

    sizefactor = 10.
    small = ndimage.interpolation.zoom(
        img, 1. / sizefactor)  #scale the image to a smaller size

    imgf = ndimage.gaussian_filter(small, 3. / xsize)  #Gaussian filter
    median = np.percentile(imgf, 40)  #40-th percentile for thresholding

    kmask = imgf > median * 1.5  #thresholding
    kmask = mahotas.dilate(kmask, mahotas.disk(5))
    kmask = mahotas.close_holes(kmask)  #closing holes
    kmask = mahotas.erode(kmask, mahotas.disk(5)) * 255

    #remove objects that are darker than 2*percentile
    l, n = ndimage.label(kmask)
    llist = np.unique(l)
    if len(llist) > 2:
        means = ndimage.mean(imgf, l, llist)
        bv = llist[np.where(means < median * 2)]
        ix = np.in1d(l.ravel(), bv).reshape(l.shape)
        kmask[ix] = 0

    kmask = ndimage.interpolation.zoom(kmask,
                                       sizefactor)  #scale back to normal size
    kmask = normalize(kmask)
    kmask = (kmask > mahotas.otsu(kmask.astype(
        np.uint8))) * 255.  #remove artifacts of interpolation

    #save indices of the kidney mask
    ind = np.where(kmask > 0)
    ind = np.array(ind)
    np.save(
        params.inputfolder + '../segmented/masks/kidney/' + params.folder +
        filename[:-4] + '.npy', ind)

    #segment glomeruli, if there is a kidney tissue
    if kmask.max() > 0:
        #remove all intensity variations larger than maximum radius of a glomerulus
        d = mahotas.disk(int(float(params.maxrad) / xsize))
        img = img - mahotas.open(img.astype(np.uint8), d)
        img = img * 255. / img.max()
        ch = img[np.where(kmask > 0)]

        #segment glomeruli by otsu thresholding	only if this threshold is higher than the 75-th percentile in the kidney mask
        t = mahotas.otsu(img.astype(np.uint8))

        if t > np.percentile(ch, 75) * 1.5:
            cells = img > t
            cells[np.where(kmask == 0)] = 0
            cells = mahotas.open(
                cells, mahotas.disk(int(float(params.minrad) / 2. / xsize)))

        else:
            cells = np.zeros_like(img)

    else:
        cells = np.zeros_like(img)

    #save indices of the glomeruli mask
    ind = np.where(cells > 0)
    ind = np.array(ind)
    np.save(
        params.inputfolder + '../segmented/masks/glomeruli/' + params.folder +
        filename[:-4] + '.npy', ind)
Beispiel #31
0
def separate_clumped_objects(clumps_image, min_cut_area, min_area, max_area,
        max_circularity, max_convexity):
    '''Separates objects in `clumps_image` based on morphological criteria.

    Parameters
    ----------
    clumps_image: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        objects that should be separated
    min_cut_area: int
        minimal area an object must have (prevents cuts that would result
        in too small objects)
    min_area: int
        minimal area an object must have to be considered a clump
    max_area: int
        maximal area an object can have to be considered a clump
    max_circularity: float
        maximal circularity an object must have to be considerd a clump
    max_convexity: float
        maximal convexity an object must have to be considerd a clump

    Returns
    -------
    numpy.ndarray[numpy.uint32]
        separated objects

    See also
    --------
    :class:`jtlib.features.Morphology`
    '''

    logger.info('separate clumped objects')
    label_image, n_objects = mh.label(clumps_image)
    if n_objects == 0:
        logger.debug('no objects')
        return label_image

    pad = 1
    cutting_pass = 1
    separated_image = label_image.copy()
    while True:
        logger.info('cutting pass #%d', cutting_pass)
        cutting_pass += 1
        label_image = mh.label(label_image > 0)[0]

        f = Morphology(label_image)
        values = f.extract()
        index = (
            (min_area < values['Morphology_Area']) &
            (values['Morphology_Area'] <= max_area) &
            (values['Morphology_Convexity'] <= max_convexity) &
            (values['Morphology_Circularity'] <= max_circularity)
        )
        clumped_ids = values[index].index.values
        not_clumped_ids = values[~index].index.values

        if len(clumped_ids) == 0:
            logger.debug('no more clumped objects')
            break

        mh.labeled.remove_regions(label_image, not_clumped_ids, inplace=True)
        mh.labeled.relabel(label_image, inplace=True)
        bboxes = mh.labeled.bbox(label_image)
        for oid in np.unique(label_image[label_image > 0]):
            bbox = bboxes[oid]
            logger.debug('process clumped object #%d', oid)
            obj_image = extract_bbox(label_image, bboxes[oid], pad=pad)
            obj_image = obj_image == oid

            # Rescale distance intensities to make them independent of clump size
            dist = mh.stretch(mh.distance(obj_image))

            # Find peaks that can be used as seeds for the watershed transform
            thresh = mh.otsu(dist)
            peaks = dist > thresh
            n = mh.label(peaks)[1]
            if n == 1:
                logger.debug(
                    'only one peak detected - perform iterative erosion'
                )
                # Iteratively shrink the peaks until we have two peaks that we
                # can use to separate the clump.
                while True:
                    tmp = mh.morph.open(mh.morph.erode(peaks))
                    n = mh.label(tmp)[1]
                    if n == 2 or n == 0:
                        if n == 2:
                            peaks = tmp
                        break
                    peaks = tmp

            # Select the two biggest peaks, since we want only two objects.
            peaks = mh.label(peaks)[0]
            sizes = mh.labeled.labeled_size(peaks)
            index = np.argsort(sizes)[::-1][1:3]
            for label in np.unique(peaks):
                if label not in index:
                    peaks[peaks == label] = 0
            peaks = mh.labeled.relabel(peaks)[0]
            regions = mh.cwatershed(np.invert(dist), peaks)

            # Use the line separating watershed regions to make the cut
            se = np.ones((3,3), np.bool)
            line = mh.labeled.borders(regions, Bc=se)
            line[~obj_image] = 0
            line = mh.morph.dilate(line)

            # Ensure that cut is reasonable given user-defined criteria
            test_cut_image = obj_image.copy()
            test_cut_image[line] = False
            subobjects, n_subobjects = mh.label(test_cut_image)
            sizes = mh.labeled.labeled_size(subobjects)
            smaller_object_area = np.min(sizes)
            smaller_id = np.where(sizes == smaller_object_area)[0][0]
            smaller_object = subobjects == smaller_id

            do_cut = (
                (smaller_object_area > min_cut_area) &
                (np.sum(line) > 0)
            )
            if do_cut:
                logger.debug('cut object #%d', oid)
                y, x = np.where(line)
                y_offset, x_offset = bboxes[oid][[0, 2]] - pad - 1
                y += y_offset
                x += x_offset
                label_image[y, x] = 0
                separated_image[y, x] = 0
            else:
                logger.debug('don\'t cut object #%d', oid)
                mh.labeled.remove_regions(label_image, oid, inplace=True)

    return mh.label(separated_image)[0]
Beispiel #32
0
def processFile(f):
   
   # Handle DICOM
   if isdicom(f):
      ds = dicom.read_file(f)
      fname = f.rsplit('.', 1)[0]+'.tif' # make a tiff file under the same name to read from
      pil_dcm = get_dicom_PIL(ds)
      pil_dcm.save(fname)
   else:
      fname = f

   # Set output file name
   fname_out = f.rsplit('.', 1)[0]+"-out.jpg"

   # Open the image file for processing
   print "File to process: "+fname      
   origimg = cv2.imread(fname, cv2.CV_LOAD_IMAGE_GRAYSCALE)
      
   # Chop off the top of the image b/c there is often noncontributory artifact & make numpy arrays
   img = origimg[25:,:]
   imarray = np.array(img)
   
   imarraymarkup = imarray
   maskarray = np.zeros_like(imarray)
   contoursarray = np.zeros_like(imarray)
   onesarray = np.ones_like(imarray)
   
    # Store dimensions for subsequent calculcations
   max_imheight = maskarray.shape[0]
   max_imwidth = maskarray.shape[1]
   
   if DEBUG: print max_imwidth, max_imheight
    
   # Choose the minimum in the entire array as the threshold value b/c some mammograms have > 0 background which screws up the contour finding if based on zero or some arbitrary number
   ret,thresh = cv2.threshold(imarray,np.amin(imarray),255,cv2.THRESH_BINARY)
   contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    
   biggest_contour = []
   for n, contour in enumerate(contours):
      if len(contour) > len(biggest_contour):
         biggest_contour = contour

    # Get the lower most extent of the contour (biggest y-value)
   max_vals = np.argmax(biggest_contour, axis = 0)
   min_vals = np.argmin(biggest_contour, axis = 0)
    #print max_vals[0,1]
   bc_max_y = biggest_contour[max_vals[0,1],0,1] # get the biggest contour max y
   bc_min_y = biggest_contour[min_vals[0,1],0,1] # get the biggest contour min y
    #print "Biggest Contour Max Y:"
    #print bc_max_y
    #print "Biggest Contour Min Y:"
    #print bc_min_y
   
   cv2.drawContours(contoursarray,biggest_contour,-1,(255,255,255),15)            

    # Calculate R/L sidedness using centroid
   M = cv2.moments(biggest_contour)
   cx = int(M['m10']/M['m00'])
   cy = int(M['m01']/M['m00'])
   right_side = cx > max_imwidth/2
    
    # Plot the center of mass
   cv2.circle(contoursarray,(cx,cy),100,[255,0,255],-1)            

    # Approximate the breast
   epsilon = 0.001*cv2.arcLength(biggest_contour,True)
   approx = cv2.approxPolyDP(biggest_contour,epsilon,True)
            
    # Calculate the hull and convexity defects
   drawhull = cv2.convexHull(approx)
    #cv2.drawContours(contoursarray,drawhull,-1,(0,255,0),60)
   hull = cv2.convexHull(approx, returnPoints = False)
   defects = cv2.convexityDefects(approx,hull)
   
    # Plot the defects and find the most superior. Note: I think the superior and inferior ones have to be kept separate
    # Also make sure that these are one beyond a reasonable distance from the centroid (arbitrarily cdist_factor = 80%) to make sure that nipple-related defects don't interfere
   supdef_y = maskarray.shape[0]
   supdef_tuple = []
   
   cdist_factor = 0.80

   if defects is not None:
      for i in range(defects.shape[0]):
         s,e,f,d = defects[i,0]
         far = tuple(approx[f][0])
         if far[1] < (cy*cdist_factor) and far[1] < supdef_y:
            supdef_y = far[1]
            supdef_tuple = far
            cv2.circle(contoursarray,far,50,[255,0,255],-1)

    # Find lower defect if there is one
    # Considering adding if a lower one is at least greater than 1/2 the distance between the centroid and the lower most border of the contour (see IMGS_MLO/IM4010.tif)
   infdef_y = 0
   infdef_tuple = []
   if defects is not None:
      for i in range(defects.shape[0]):
         s,e,f,d = defects[i,0]
         far = tuple(approx[f][0])
         if far[1] > infdef_y and supdef_tuple: # cy + 3/4*(bc_max_y - cy) = (bc_max_y + cy)/2
            if (right_side and far[0] > supdef_tuple[0]) or (not right_side and far[0] < supdef_tuple[0]):
               infdef_y = far[1]
               infdef_tuple = far
               cv2.circle(contoursarray,far,50,[255,0,255],-1)

    # Try cropping contour beyond certain index; get indices of supdef/infdef tuples, and truncate vector beyond those indices
   cropped_contour = biggest_contour[:,:,:]
               
   if supdef_tuple:
      sup_idx = [i for i, v in enumerate(biggest_contour[:,0,:]) if v[0] == supdef_tuple[0] and v[1] == supdef_tuple[1]]
      if sup_idx:
         if right_side:
            cropped_contour = cropped_contour[sup_idx[0]:,:,:]
         else:
            cropped_contour = cropped_contour[:sup_idx[0],:,:]
            
   if infdef_tuple:
      inf_idx = [i for i, v in enumerate(cropped_contour[:,0,:]) if v[0] == infdef_tuple[0] and v[1] == infdef_tuple[1]]
      if inf_idx:
         if right_side:
            cropped_contour = cropped_contour[:inf_idx[0],:,:]
         else:
            cropped_contour = cropped_contour[inf_idx[0]:,:,:]
         
   if right_side:
      cropped_contour = cropped_contour[cropped_contour[:,0,1] != 1]
   else:
      cropped_contour = cropped_contour[cropped_contour[:,0,0] != 1]

    # Draw the cropped contour
    #cv2.drawContours(imarraymarkup,cropped_contour,-1,(255,255,0),30)
    #cv2.drawContours(imarraymarkup,biggest_contour,-1,(255,0,0),30)

    # Fill in the cropped polygon to mask
    #cv2.fillPoly(maskarray, pts = [cropped_contour], color=(255,255,255))
   cv2.fillPoly(maskarray, pts = [cropped_contour], color=(255,255,255))
    #maskarray = ~np.all(maskarray == 0, axis=1)a
    #print maskarray
    #maskarray[~np.all(maskarray == 0, axis=2)]

    # Threshold the cropped version
    #ret_otsu,thresh_otsu = cv2.threshold(imarray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

    # Multiply original image to the mask to get the cropped image
   imarray2 = imarray + onesarray
   imcrop_orig = cv2.bitwise_and(imarray2, maskarray)
    #cv2.drawContours(maskarray,biggest_contour,-1,(255,0,0),3)
    
   rankmin_out = rank.minimum(imcrop_orig, disk(20))
   thresh = mahotas.otsu(rankmin_out, ignore_zeros = True)

    # Draw thick black contour to eliminate the skin and nipple from the image
   cv2.drawContours(rankmin_out,cropped_contour,-1,(0,0,0),255) # 
    #cv2.drawContours(maskarray,cropped_contour,-1,(0,0,0),255) # 

    # Apply the thresholding to generate a new matrix and convert to int type
   otsubool_out = rankmin_out > thresh
   otsubinary_out = otsubool_out.astype('uint8')
   otsuint_out = otsubinary_out * 255

    # Crop out the fibroglandular tissue
    #print output2.shape, imarray2.shape, np.amax(output2), np.amax(imarray2), np.amax(maskarray)
   imcrop_fgt = cv2.bitwise_and(imarray2, otsuint_out) # both arrays are uint8 type

   segmented = maskarray > 0
   segmented = segmented.astype(int)
   segmented_sum = segmented.sum()
   otsubinary_sum = otsubinary_out.sum()
   
   density = (otsubinary_sum*100/segmented_sum).astype(int)
   
   if density < 25:
      dcat = 'Fatty'
   elif density < 50:
      dcat = 'Scattered'
   elif density < 75:
      dcat = 'Heterogenous'
   else:
      dcat = 'Extremely Dense'
        
   if right_side:
      side = 'Right'
   else:
      side = 'Left'

   if bc_min_y > 1:
      view = 'CC'
   else:
      view = 'MLO'
      
   avg = (imcrop_fgt.sum()/otsubinary_sum).astype(int)
   print side, view, otsubinary_sum, segmented_sum, density, dcat, avg, avg/np.amax(imcrop_fgt), np.amax(imcrop_fgt)
    
   # Create pil images
   pil1 = Image.fromarray(imarray2)
   pil2 = Image.fromarray(imcrop_fgt)
   pil3 = Image.fromarray(contoursarray)
   pil4 = Image.fromarray(maskarray)

   # Pasting images above to a pil background along with text. There's a lot of particular measurements sizing the fonts & pictures so that everything fits.  It's somewhat arbitrary with lots of trial and error, but basically everything is based off the resized width of the first image.  Images needed to be resized down b/c they were too high resolution for canvas.
   rf = 2 # rf = resize factor

   w1,h1 = pil1.size
   pil1_sm = pil1.resize((w1/rf,h1/rf))
   w1_sm,h1_sm = pil1_sm.size

   pil2_sm = pil2.resize((w1_sm,h1_sm))
   pil3_sm = pil3.resize((w1_sm,h1_sm))
   pil4_sm = pil4.resize((w1_sm,h1_sm))

   pil_backdrop = Image.new('L', (100+2*w1_sm,2*h1_sm+h1_sm/2), "white")

   pil_backdrop.paste(pil1_sm, (0,h1_sm/8))
   pil_backdrop.paste(pil2_sm, (100+w1_sm,h1_sm/8))
   pil_backdrop.paste(pil3_sm, (0,h1_sm/4+h1_sm))
   pil_backdrop.paste(pil4_sm, (100+w1_sm,h1_sm/4+h1_sm))

   font = ImageFont.truetype(FONT_PATH+"Arial.ttf",w1_sm/18)

   draw = ImageDraw.Draw(pil_backdrop)
   draw.text((0,0),"Original Image",0,font=font)
   draw.text((100+w1_sm,0),"Fibroglandular Tissue",0,font=font)
   draw.text((0,h1_sm+h1_sm/6),"Breast Contouring",0,font=font)
   draw.text((100+w1_sm,h1_sm+h1_sm/6),"Breast Segmentation",0,font=font)

   pil_backdrop.save(fname_out)
    
   return density, dcat, side, view
Beispiel #33
0
from __future__ import print_function
import mahotas as mh
from pylab import gray, imshow, show
import numpy as np


luispedro = mh.demos.load('luispedro')
luispedro = luispedro.max(2)
T = mh.otsu(luispedro)
lpbin = (luispedro > T)
eye = lpbin[112:180,100:190]
gray()
imshow(eye)
show()
imshow(~mh.morph.close(~eye))
show()
imshow(~mh.morph.open(~eye))
show()
Beispiel #34
0
def mySegmentation(img,s,method='adaptive',BoW='B',thr=0.75,l_th1=0,l_th2=550,seeds_thr1=50,seeds_thr2=500,block_size=7,offs=-0,visual=0):
    
    """the user can choose wether to use otsu for seeds (merkers) definition or get seeds from the standard deviation map"""
    img=Prepare_im(img);  
    sz=np.shape(img)
    seeds=np.zeros((sz[0],sz[1]))

    if method=='otsu':


        t=threshold_otsu(img.astype(uint16))*thr
        l=img<t

                #seeds=abs(seeds-1)      

        [l,N]=msr.label(l,return_num=True)

        [l,N]=remove_reg(l,l_th1,l_th2)   



    if visual:
        figure();imshow(img)
        figure();imshow(seeds)
        figure();imshow(l)
        
        
    if method=='adaptive':


        binary_adaptive = threshold_adaptive(-img, block_size, offset=offs)

        l=binary_adaptive

        [l,N]=msr.label(l,return_num=True)

        l,N=remove_reg(l,l_th1,l_th2)


        l=l!=0

        l=sgm.clear_border(l)

        l=mph.dilation(l)


        [l,n]=msr.label(l,return_num=True)


              

    if method=='std' :

#%compute otsu mask

        #s=std_image(img,0)
        t=mht.otsu(img.astype(uint16))*thr
        tempseeds=img<t
 

        s2=np.copy(s)
        s2=ndi.maximum_filter(s2,10)
        local_maxi = peak_local_max((s2 ).astype(np.double), indices=False,footprint=np.ones((10, 10)),min_distance=100000)
               

        #seeds=pymorph.regmin((-s2).astype(np.uint16)) #,array([[False,True,True,False],[False,True,True,False]]))
        seeds=local_maxi

        #seeds,n=mht.label(seeds)
        im=Prepare_im(img)
        t=threshold_otsu(im)
        mask=im<t*0.85

        seeds=msr.label(seeds)
        seeds,N=remove_reg(seeds,seeds_thr1,seeds_thr2)   
    
       # l = mht.cwatershed(img, seeds)
        l = mph.watershed(img, msr.label(local_maxi),mask=mph.binary_dilation(mask))
        #l=mph.watershed(img,seeds)
        l=l.astype(int32)        
        l,n=remove_reg(l,l_th1,l_th2)
        l=mht.labeled.remove_bordering(l)
        print 'label'
        print mht.labeled.labeled_size(l)
        [l,n]=msr.label(l,return_num=True)

    if visual:
        figure();imshow(img)
        figure();imshow(seeds)
        figure();imshow(l)
        
    return seeds,N,l


    if visual:
        figure();imshow(img)
        figure();imshow(seeds)
        figure();imshow(l)
        
    
        

    return seeds,N,l
Beispiel #35
0
import mahotas as mh 
import numpy as np
from matplotlib import pyplot as plt
from IPython.html.widgets import interact

plt.rcParams['figure.figsize'] = (10.0, 8.0) # 10 x 8inches 
plt.gray()
dna =mh.demos.load('Nuclear')
print(dna.shape) 
dna=dna.max(axis=2) 
print(dna.shape) 
plt.imshow(dna)
T_otsu = mh.otsu(dna) 
print(T_otsu) 
plt.imshow(dna > T_otsu)
T_mean = dna.mean() 
print(T_mean)
plt.imshow(dna > T_mean)
dnaf = mh.gaussian_filter(dna, 2.) 
T_mean = dnaf.mean()

bin_image = dnaf > T_mean 
plt.imshow(bin_image)
labeled, nr_objects = mh.label(bin_image) 
print(nr_objects)
plt.imshow(labeled) 
plt.jet()
@interact(sigma=(1.,16.)) 
def check_sigma(sigma):
    dnaf = mh.gaussian_filter(dna.astype(float), sigma) 
    maxima = mh.regmax(mh.stretch(dnaf))
Beispiel #36
0
    print('No root found for leaf %s' % basename)

print('Resolution: ' + str(px_mm) + ' px/mm')
print('Root:       ' + str(root))
print()

print('Reading image %s' % filename)
img = mh.imread(filename)

# color image handling - works if the background is brighter than the object
if len(img.shape) == 3:  #color image
    #convert to grayscale
    img = mh.colors.rgb2gray(img, dtype=np.uint8)

# thresholding
T_otsu = mh.otsu(img)  # finds a numeric threshold
img = (img > T_otsu)  # make image binary

# invert the image (just because the test image is stored the other way)
img = ~img

# close single-pixel holes. Also makes the skeletonization much more well-behaved,
# with less tiny branches close to terminals.
#
# This can create loops if two branches are separated by < 3 px of background
img = mh.close(img)

print('Thinning...')
# skeletonization from scikit image.
# Zhang-Suen algorithm (apparently with staircase removal)
skel = morphology.skeletonize(img)
def watershedSegment(image, diskSize=20):

    def gradientMagnitudue(image):
        sobel_x = nd.sobel(image.astype('double'), 0)
        sobel_y = nd.sobel(image.astype('double'), 1)
        return np.sqrt((sobel_x * sobel_x) + (sobel_y * sobel_y))    

    def imimposemin(image, mask, connectivity):
        fm = image.copy()
        fm[mask] = -9223372036854775800
        fm[np.logical_not(mask)] = 9223372036854775800

        fp1 = image + 1
        
        g = np.minimum(fp1, fm)
        
        j = infrec(fm, g)
        return j

    def infrec(f, g, Bc=None):
        if Bc is None: Bc = pymorph.secross()
        n = f.size
        return fast_conditional_dilate(f, g, Bc, n);

    def fast_conditional_dilate(f, g, Bc=None, n=1):
        if Bc is None:
            Bc = pymorph.secross()
        f = pymorph.intersec(f,g)
        for i in xrange(n):
            prev = f
            f = pymorph.intersec(mahotas.dilate(f, Bc), g)
            if pymorph.isequal(f, prev):
                break
        return f

    gradmag = gradientMagnitudue(image)

    ## compute foreground markers

    # open image to create flat regions at cell centers
    se_disk = pymorph.sedisk(diskSize) 
    image_opened = mahotas.open(image, se_disk);

    # define foreground markers as regional maxes of cells
    # this step is slow!
    foreground_markers = mahotas.regmax(image_opened)

    ## compute background markers

    # Threshold the image, cast it to the right datatype, and then calculate the distance image
    image_black_white = image_opened > mahotas.otsu(image_opened)
    image_black_white = image_black_white.astype('uint16')

    # note the inversion here- a key difference from the matlab algorithm
    # matlab distance is to nearest non-zero pixel
    # python distance is to nearest 0 pixel
    image_distance = pymorph.to_uint16(nd.distance_transform_edt(np.logical_not(image_black_white)))
    eight_conn = pymorph.sebox()

    distance_markers = mahotas.label(mahotas.regmin(image_distance, eight_conn))[0]
    image_dist_wshed, image_dist_wshed_lines = mahotas.cwatershed(image_distance, distance_markers, eight_conn, return_lines=True)
    background_markers = image_dist_wshed_lines - image_black_white

    all_markers = np.logical_or(foreground_markers, background_markers)

    # impose a min on the gradient image.  assumes int64
    gradmag2 = imimposemin(gradmag.astype(int), all_markers, eight_conn)

    # call watershed
    segmented_cells, segmented_cell_lines = mahotas.cwatershed(gradmag2, mahotas.label(all_markers)[0], eight_conn, return_lines=True)
    segmented_cells -= 1
    
    # seperate watershed regions
    segmented_cells[gradientMagnitudue(segmented_cells) > 0] = 0
    return segmented_cells > 0, segmented_cells
Beispiel #38
0
from __future__ import print_function
import mahotas
from pylab import gray, imshow, show
import numpy as np

luispedro = mahotas.imread('./data/luispedro.jpg')
luispedro = luispedro.max(2)
T = mahotas.otsu(luispedro)
lpbin = (luispedro > T)
eye = lpbin[112:180,100:190]
gray()
imshow(eye)
show()
imshow(~mahotas.morph.close(~eye))
show()
imshow(~mahotas.morph.open(~eye))
show()
Beispiel #39
0
def double_otsu_larger(im):
    t = mahotas.otsu(im)
    t2 = mahotas.otsu(im[im>t])
    return t2
import mahotas
import mahotas.demos
import numpy as np
from pylab import imshow, gray, show
from os import path

img = mahotas.demos.load('lean', as_grey = True)
img = img.astype(np.uint8)

#Seuillage avec méthode Otsu
seuillage_otsu = mahotas.otsu(img)
imshow(img > seuillage_otsu)
show()

#seuillage avec méthode Riddler_Calvard
seuillage_rc = mahotas.rc(img)
imshow(img > seuillage_rc)
show()
Beispiel #41
0
def motsu(im,iz):
    th = mahotas.otsu(im, ignore_zeros = iz)
    int_out, binary_sum, intinv_out, binaryinv_sum = bintoint(im, th)

    return binary_sum, int_out, binaryinv_sum, intinv_out
Beispiel #42
0
import argparse
import cv2
from matplotlib import pyplot as plt
import mahotas
import pytesseract

ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
plt.imshow(blurred, cmap="gray")

T = mahotas.otsu(blurred)
print("Otsu`s threshold:{}".format(T))

thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < T] = 0
thresh = cv2.bitwise_not(thresh)
plt.figure()
plt.imshow(thresh, cmap="gray")

T = mahotas.rc(blurred)
print("Riddler-Calvard:{}".format(T))

thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < T] = 0
Beispiel #43
0
    def doLocalNMF(self, x, y, roi, n_comp=7, diskSizeMultiplier=3):
        # do NMF decomposition
        n = NMF(n_components=n_comp, tol=1e-1)

        xmin_nmf = max(0, int(x - self.diskSize * diskSizeMultiplier))
        xmax_nmf = min(int(x + self.diskSize * diskSizeMultiplier), self.data.shape[0])
        ymin_nmf = max(0, int(y - self.diskSize * diskSizeMultiplier))
        ymax_nmf = min(int(y + self.diskSize * diskSizeMultiplier), self.data.shape[1])

        xcenter_nmf = (xmax_nmf - xmin_nmf) / 2
        ycenter_nmf = (ymax_nmf - ymin_nmf) / 2

        reshaped_sub_region_data = self.data_white[xmin_nmf:xmax_nmf, ymin_nmf:ymax_nmf, :].reshape(
            xmax_nmf - xmin_nmf * ymax_nmf - ymin_nmf, self.data.shape[2]
        )
        n.fit(reshaped_sub_region_data - reshaped_sub_region_data.min())
        transformed_sub_region_data = n.transform(reshaped_sub_region_data - reshaped_sub_region_data.min())
        modes = transformed_sub_region_data.reshape(xmax_nmf - xmin_nmf, ymax_nmf - ymin_nmf, n_comp).copy()

        modes = [m for m in np.rollaxis(modes, 2, 0)]
        params = []
        this_cell = []
        is_cell = []
        thresh_modes = []
        fit_data = []
        for i, mode in enumerate(modes):
            # threshold mode
            uint16_mode = (mode / mode.max() * 2 ** 16).astype("uint16")
            uint16_mode = mahotas.dilate(mahotas.erode(uint16_mode))
            uint16_mode = nd.gaussian_filter(uint16_mode, 1)
            thresh_mode = uint16_mode > mahotas.otsu(uint16_mode)
            # exclude all pixels less than 75% of typical size
            smallest_roi = 0.75 * self.diskSize * self.diskSize * np.pi
            thresh_mode = self.excludePixels(thresh_mode, smallest_roi).astype(int)

            thresh_modes.append(thresh_mode)
            #            thresh_mode = (mode.astype('uint16') > mahotas.otsu(mode.astype('uint16'))).astype(int)

            # fit thresholded mode
            fit_parameters = self.fitgaussian(thresh_mode)
            fit_height, fit_xcenter, fit_ycenter, fit_xwidth, fit_ywidth = fit_parameters
            params.append(fit_parameters)

            # is cell-like?
            if 1 <= np.abs(fit_xwidth) <= 2 * self.diskSize and 1 <= np.abs(fit_ywidth) <= 2 * self.diskSize:
                if 0.02 <= thresh_mode.sum() / float(thresh_mode.size) <= 0.40:
                    is_cell.append(True)
                else:
                    is_cell.append(False)
            else:
                is_cell.append(False)

            # is this cell?
            if (
                np.linalg.norm(np.array([xcenter_nmf, ycenter_nmf]) - np.array([fit_xcenter, fit_ycenter]))
                < self.diskSize * 1.5
            ):
                this_cell.append(True)
            else:
                this_cell.append(False)

            fit_gaussian = self.gaussian(*fit_parameters)
            xcoords = np.mgrid[0 : xmax_nmf - xmin_nmf, 0 : ymax_nmf - ymin_nmf][0]
            ycoords = np.mgrid[0 : xmax_nmf - xmin_nmf, 0 : ymax_nmf - ymin_nmf][1]
            fit_data.append(fit_gaussian(xcoords, ycoords))

        #       print 'this cell', this_cell
        #       print 'is cell', is_cell
        #       print ' '

        return (
            modes,
            thresh_modes,
            fit_data,
            np.array(this_cell),
            np.array(is_cell),
            (xmin_nmf, xmax_nmf, ymin_nmf, ymax_nmf),
        )
Beispiel #44
0
def watershedSegment(image, diskSize=20):
    """This routine implements the watershed example from 
    http://www.mathworks.com/help/images/examples/marker-controlled-watershed-segmentation.html, 
    but using pymorph and mahotas.

    :param image: an image (2d numpy array) to be segemented
    :param diskSize: an integer used as a size for a structuring element used 
                     for morphological preprocessing.
    :returns: tuple of binarized and labeled segmention masks
    """
    def gradientMagnitudue(image):
        sobel_x = nd.sobel(image.astype('double'), 0)
        sobel_y = nd.sobel(image.astype('double'), 1)
        return np.sqrt((sobel_x * sobel_x) + (sobel_y * sobel_y))

    def imimposemin(image, mask, connectivity):
        fm = image.copy()
        fm[mask] = -9223372036854775800
        fm[np.logical_not(mask)] = 9223372036854775800

        fp1 = image + 1

        g = np.minimum(fp1, fm)

        j = infrec(fm, g)
        return j

    def infrec(f, g, Bc=None):
        if Bc is None: Bc = pymorph.secross()
        n = f.size
        return fast_conditional_dilate(f, g, Bc, n)

    def fast_conditional_dilate(f, g, Bc=None, n=1):
        if Bc is None:
            Bc = pymorph.secross()
        f = pymorph.intersec(f, g)
        for i in xrange(n):
            prev = f
            f = pymorph.intersec(mahotas.dilate(f, Bc), g)
            if pymorph.isequal(f, prev):
                break
        return f

    gradmag = gradientMagnitudue(image)

    ## compute foreground markers

    # open image to create flat regions at cell centers
    se_disk = pymorph.sedisk(diskSize)
    image_opened = mahotas.open(image, se_disk)

    # define foreground markers as regional maxes of cells
    # this step is slow!
    foreground_markers = mahotas.regmax(image_opened)

    ## compute background markers

    # Threshold the image, cast it to the right datatype, and then calculate the distance image
    image_black_white = image_opened > mahotas.otsu(image_opened)
    image_black_white = image_black_white.astype('uint16')

    # note the inversion here- a key difference from the matlab algorithm
    # matlab distance is to nearest non-zero pixel
    # python distance is to nearest 0 pixel
    image_distance = pymorph.to_uint16(
        nd.distance_transform_edt(np.logical_not(image_black_white)))
    eight_conn = pymorph.sebox()

    distance_markers = mahotas.label(mahotas.regmin(image_distance,
                                                    eight_conn))[0]
    image_dist_wshed, image_dist_wshed_lines = mahotas.cwatershed(
        image_distance, distance_markers, eight_conn, return_lines=True)
    background_markers = image_dist_wshed_lines - image_black_white

    all_markers = np.logical_or(foreground_markers, background_markers)

    # impose a min on the gradient image.  assumes int64
    gradmag2 = imimposemin(gradmag.astype(int), all_markers, eight_conn)

    # call watershed
    segmented_cells, segmented_cell_lines = mahotas.cwatershed(
        gradmag2, mahotas.label(all_markers)[0], eight_conn, return_lines=True)
    segmented_cells -= 1

    # seperate watershed regions
    segmented_cells[gradientMagnitudue(segmented_cells) > 0] = 0
    return segmented_cells > 0, segmented_cells
Beispiel #45
0
import numpy as np
import pylab
import mahotas as mh
dna = mh.imread('playingcards2.jpg')
dnaf = mh.gaussian_filter(dna, 1)
dnaf = (dnaf.astype(np.uint8))
T = mh.otsu(dnaf)
pylab.imshow(dnaf > T)
pylab.show()
Beispiel #46
0
def separate_clumped_objects(clumps_image, min_cut_area, min_area, max_area,
        max_circularity, max_convexity):
    '''Separates objects in `clumps_image` based on morphological criteria.

    Parameters
    ----------
    clumps_image: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        objects that should be separated
    min_cut_area: int
        minimal area an object must have (prevents cuts that would result
        in too small objects)
    min_area: int
        minimal area an object must have to be considered a clump
    max_area: int
        maximal area an object can have to be considered a clump
    max_circularity: float
        maximal circularity an object must have to be considerd a clump
    max_convexity: float
        maximal convexity an object must have to be considerd a clump

    Returns
    -------
    numpy.ndarray[numpy.uint32]
        separated objects

    See also
    --------
    :class:`jtlib.features.Morphology`
    '''

    logger.info('separate clumped objects')
    label_image, n_objects = mh.label(clumps_image)
    if n_objects == 0:
        logger.debug('no objects')
        return label_image

    pad = 1
    cutting_pass = 1
    separated_image = label_image.copy()
    while True:
        logger.info('cutting pass #%d', cutting_pass)
        cutting_pass += 1
        label_image = mh.label(label_image > 0)[0]

        f = Morphology(label_image)
        values = f.extract()
        index = (
            (min_area < values['Morphology_Area']) &
            (values['Morphology_Area'] <= max_area) &
            (values['Morphology_Convexity'] <= max_convexity) &
            (values['Morphology_Circularity'] <= max_circularity)
        )
        clumped_ids = values[index].index.values
        not_clumped_ids = values[~index].index.values

        if len(clumped_ids) == 0:
            logger.debug('no more clumped objects')
            break

        mh.labeled.remove_regions(label_image, not_clumped_ids, inplace=True)
        mh.labeled.relabel(label_image, inplace=True)
        bboxes = mh.labeled.bbox(label_image)
        for oid in np.unique(label_image[label_image > 0]):
            bbox = bboxes[oid]
            logger.debug('process clumped object #%d', oid)
            obj_image = extract_bbox(label_image, bboxes[oid], pad=pad)
            obj_image = obj_image == oid

            # Rescale distance intensities to make them independent of clump size
            dist = mh.stretch(mh.distance(obj_image))

            # Find peaks that can be used as seeds for the watershed transform
            thresh = mh.otsu(dist)
            peaks = dist > thresh
            n = mh.label(peaks)[1]
            if n == 1:
                logger.debug(
                    'only one peak detected - perform iterative erosion'
                )
                # Iteratively shrink the peaks until we have two peaks that we
                # can use to separate the clump.
                while True:
                    tmp = mh.morph.open(mh.morph.erode(peaks))
                    n = mh.label(tmp)[1]
                    if n == 2 or n == 0:
                        if n == 2:
                            peaks = tmp
                        break
                    peaks = tmp

            # Select the two biggest peaks, since we want only two objects.
            peaks = mh.label(peaks)[0]
            sizes = mh.labeled.labeled_size(peaks)
            index = np.argsort(sizes)[::-1][1:3]
            for label in np.unique(peaks):
                if label not in index:
                    peaks[peaks == label] = 0
            peaks = mh.labeled.relabel(peaks)[0]
            regions = mh.cwatershed(np.invert(dist), peaks)

            # Use the line separating watershed regions to make the cut
            se = np.ones((3,3), np.bool)
            line = mh.labeled.borders(regions, Bc=se)
            line[~obj_image] = 0
            line = mh.morph.dilate(line)

            # Ensure that cut is reasonable given user-defined criteria
            test_cut_image = obj_image.copy()
            test_cut_image[line] = False
            subobjects, n_subobjects = mh.label(test_cut_image)
            sizes = mh.labeled.labeled_size(subobjects)
            smaller_object_area = np.min(sizes)
            smaller_id = np.where(sizes == smaller_object_area)[0][0]
            smaller_object = subobjects == smaller_id

            do_cut = (
                (smaller_object_area > min_cut_area) &
                (np.sum(line) > 0)
            )
            if do_cut:
                logger.debug('cut object #%d', oid)
                y, x = np.where(line)
                y_offset, x_offset = bboxes[oid][[0, 2]] - pad - 1
                y += y_offset
                x += x_offset
                label_image[y, x] = 0
                separated_image[y, x] = 0
            else:
                logger.debug('don\'t cut object #%d', oid)
                mh.labeled.remove_regions(label_image, oid, inplace=True)

    return mh.label(separated_image)[0]
Beispiel #47
0
import numpy as np
import mahotas as mh
image = mh.imread('../SimpleImageDataset/building05.jpg')
image = mh.colors.rgb2gray(image)

# Compute Gaussian filtered versions with increasing kernel widths
im8  = mh.gaussian_filter(image,  8)
im16 = mh.gaussian_filter(image, 16)
im32 = mh.gaussian_filter(image, 32)

# We now build a composite image with three panels:
#
# [ IM8 | | IM16 | | IM32 ]

h, w = im8.shape
canvas = np.ones((h, 3 * w + 256), np.uint8)
canvas *= 255
canvas[:, :w] = im8
canvas[:, w + 128:2 * w + 128] = im16
canvas[:, -w:] = im32
mh.imsave('../1400OS_10_05+.jpg', canvas[:, ::2])

# Threshold the image
# We need to first stretch it to convert to an integer image
im32 = mh.stretch(im32)
ot32 = mh.otsu(im32)

# Convert to 255 np.uint8 to match the other images
im255 = 255 * (im32 > ot32).astype(np.uint8)
mh.imsave('../1400OS_10_06+.jpg', im255)
Beispiel #48
0
import mahotas
import numpy as np
from pylab import imshow, gray, show, subplot
from os import path

luispedro_image = path.join(
                    path.dirname(path.abspath(__file__)),
                    'data',
                    'luispedro.jpg')

photo = mahotas.imread(luispedro_image, as_grey=True)
photo = photo.astype(np.uint8)

gray()
subplot(131)
imshow(photo)

T_otsu = mahotas.otsu(photo)
print(T_otsu)
subplot(132)
imshow(photo > T_otsu)

T_rc = mahotas.rc(photo)
print(T_rc)
subplot(133)
imshow(photo > T_rc)
show()

Beispiel #49
0

files = listdir(dir_, include='.tif')
files.sort()
#files = files[20:]

file_ = files[0]
import gc

for file_ in files:
    try:
        f = fp.tiffload(file_)
        meta = f.metadata
        resolution = [meta['spacing'], 0.7568361, 0.7568361]
        data = f.data[:140, [0], 290:700, :400]
        data[data < mh.otsu(data, True) / 10] = 0
        data = autocrop(data, 2500, fct=np.max)
        data = data[:,0]
        del f
        gc.collect()
#        resolution = np.array((meta['spacing'], 0.3045961, 0.3045961))

        resolution[0] *=2
        data = data[::2]
#        if resolution[0] < 2.:
#            downfac = np.floor(2. / resolution[0]).astype(np.int)
#            resolution[0] *= downfac
#            data = data[::downfac]
        data = median_filter(data, size=1, footprint=get_footprint(3, 2))
        data = median_filter(data, size=1, footprint=get_footprint(3, 2))
        data = gaussian_filter(data, sigma=[.25, .75,.75])
def otsu(img, k=1.0):
    T_otsu = mh.otsu(img)
    return img > k * T_otsu
Beispiel #51
0
    print('No root found for leaf %s'%basename)
    
print('Resolution: ' + str(px_mm) + ' px/mm')
print('Root:       ' + str(root))
print()

print('Reading image %s'%filename)
img = mh.imread(filename)

# color image handling - works if the background is brighter than the object
if len(img.shape) == 3: #color image
    #convert to grayscale
    img = mh.colors.rgb2gray(img, dtype=np.uint8)

# thresholding
T_otsu = mh.otsu(img) # finds a numeric threshold
img = (img > T_otsu)  # make image binary

# invert the image (just because the test image is stored the other way)
img = ~img 

# close single-pixel holes. Also makes the skeletonization much more well-behaved,
# with less tiny branches close to terminals.
#
# This can create loops if two branches are separated by < 3 px of background
img = mh.close(img)

print('Thinning...')
# skeletonization from scikit image.
# Zhang-Suen algorithm (apparently with staircase removal)
skel = morphology.skeletonize(img)
Beispiel #52
0
def double_otsu_smaller(im):
    t = mahotas.otsu(im)
    t2 = mahotas.otsu(im[im<t])
    return t2