Example #1
0
def test_histogram02():
    "histogram 2"
    labels = [1, 1, 1, 1, 2, 2, 2, 2]
    expected = [0, 2, 0, 1, 1]
    input = np.array([1, 1, 3, 4, 3, 3, 3, 3])
    output = ndimage.histogram(input, 0, 4, 5, labels, 1)
    assert_array_almost_equal(output, expected)
def fuzzy_thresh(image):
  size = image.shape[0]*image.shape[1]
  hist = ndimage.histogram(image, 0, 255, 256)
  sum  = numpy.dot(numpy.arange(0,256), hist)
  
  S = 0
  W = 0
  
  fuzzinessmax = 0.0
  Tmax = 0
  
  for T in range(254):
    S  += hist[T]
    nS  = size-S
    W  += T*hist[T]
    nW  = sum-W
    
    if nW > 0:
      u0 = int(float(W)/S)
      u1 = int(float(nW)/nS)
      
      fuzziness = yager_entropy(hist, T, u0, u1)
      if fuzziness > fuzzinessmax: (Tmax, fuzzinessmax) = (T, fuzziness)
  
  print Tmax
  return ((image > Tmax)*255).astype(numpy.uint8)
def otsu(image):
  size = image.shape[0]*image.shape[1]
  hist  = ndimage.histogram(image, 0, 255, 256)
  sum = numpy.dot(numpy.arange(0,256), hist)
  
  sumB = 0.0
  
  wB = 0
  wF = 0
  varmax = 0.0
  Tmax = 0
  
  for T in range(255):
    wB += hist[T]                   #Weight Background
    if (wB == 0): continue
    wF = size-wB                    #Weight Foreground
    if (wF == 0): break
    
    sumB += float(T * hist[T])
    
    mB = sumB / wB             # Mean Background
    mF = (sum - sumB) / wF     # Mean Foreground
    
    varBetween = float(wB) * float(wF) * (mB - mF) * (mB - mF)

    if varBetween > varmax: (varmax, Tmax) = (varBetween, T)
      
  print Tmax
  return ((image > Tmax)*255).astype(numpy.uint8)
Example #4
0
def filterObjects(labels, num=None, min_size=150, max_size=2000, in_place=False):
    """Remove too small or too big objects from array.

    labels: array labels given by ndimage.label function.
    num: int Total number of labels given by ndimage.label function.

    return: filtered_array, number_of_objects on this array
    """
    if num is None:
        num = labels.max()  # Not too safe
    if in_place:
        lbls = labels
        # Compute labels sizes. Not so fast as np.bincount.
        comp_sizes = ndimage.histogram(
            input=labels, min=0, max=num, bins=num + 1)
    else:
        # We can't just copy cause numpy.bincount has an bug and falls with uint dtypes.
        # lbls = labels.copy()
        lbls = labels.astype(np.int32)
        # Compute labels sizes.
        comp_sizes = np.bincount(lbls.ravel())
    fmask = (max_size < comp_sizes) | (min_size > comp_sizes)
    # fmask[lbls] Returns two-dimensional bool array. `True` for objects that's need to removal.
    lbls[fmask[lbls]] = 0
    return lbls, num - fmask[1:].sum()
Example #5
0
    def pare_on_volume(
        self,
        thresh=None,
        corners=False,
        timer=False,
        backup=False
        ):
        """
        Remove discrete regions that do not meet a pixel-volume
        threshold. Requires a pixel threshold be set.
        """

        # .............................................................
        # Error checking
        # .............................................................

        if thresh==None:
            print "Need a threshold."
            return

        # .............................................................
        # Back up the mask first if requested
        # .............................................................

        if backup==True:
            self.backup=self.data

        # .............................................................
        # Label the mask
        # .............................................................
        
        structure = (Struct(
                "simple", 
                ndim=self.data.ndim,                
                corners=corners)).struct

        labels, nlabels = label(self.data,
                                structure=structure)

        # .............................................................
        # Histogram the labels
        # .............................................................

        hist = histogram(
            labels, 0.5, nlabels+0.5, nlabels)
        
        # .............................................................
        # Identify the low-volume regions
        # .............................................................
        
        if np.sum(hist < thresh) == 0:
            return
        
        loc = find_objects(labels)

        for reg in np.arange(1,nlabels):
            if hist[reg-1] > thresh:
                continue
            self.data[loc[reg-1]] *= (labels[loc[reg-1]] != reg)
 def get_choice_histogram(self, min=None, max=None, bins=None):
     """Give an array that represents a histogram of choices."""
     if max == None:
         max = self.choices.max() + 1
     if min == None:
         min = self.choices.min()
     if bins == None:
         bins = max - min
     return histogram(self.get_choices(), min, max, bins)
Example #7
0
def test_histogram03():
    labels = [1, 0, 1, 1, 2, 2, 2, 2]
    expected1 = [0, 1, 0, 1, 1]
    expected2 = [0, 0, 0, 3, 0]
    input = np.array([1, 1, 3, 4, 3, 5, 3, 3])
    output = ndimage.histogram(input, 0, 4, 5, labels, (1,2))

    assert_array_almost_equal(output[0], expected1)
    assert_array_almost_equal(output[1], expected2)
Example #8
0
def entropy(x):
    '''The entropy of x as if x is a probability distribution'''
    histogram = scind.histogram(x.astype(float), np.min(x), np.max(x), 256)
    n = np.sum(histogram)
    if n > 0 and np.max(histogram) > 0:
        histogram = histogram[histogram!=0]
        return np.log2(n) - np.sum(histogram * np.log2(histogram))/n
    else:
        return 0
def create_histogram(values, main="", xlabel="", bins=None):
    """Plot a histogram of values which is a numpy array.
    """
    from matplotlib.pylab import text
    mini = values.min()
    maxi = values.max()
    if bins == None:
        bins = int(maxi - mini)
    hist = histogram(values, mini, maxi+0.00001, bins)
    create_barchart(hist, bins, mini, maxi, main)
    text(maxi/2.0,-(hist.max()-hist.min())/20.0,s=xlabel,horizontalalignment='center',verticalalignment='top')
Example #10
0
def labelmeanfilter_nd(y, x):
   # requires integer labels
   # from mailing list scipy-user 2009-02-11
   # adjusted for 2d x with column variables

   labelsunique = np.arange(np.max(y)+1)
   labmeansdata = []
   labmeans = []

   for xx in x.T:
      labelmeans = np.array(ndimage.mean(xx, labels=y, index=labelsunique))
      labmeansdata.append(labelmeans[y])
      labmeans.append(labelmeans)
   # group count:
   labelcount = np.array(ndimage.histogram(y, labelsunique[0], labelsunique[-1]+1,
                        1, labels=y, index=labelsunique))

   # returns array of lable/group counts and of label/group means
   #         and label/group means for each original observation
   return labelcount, np.array(labmeans), np.array(labmeansdata).T
Example #11
0
def test_histogram02():
    labels = [1, 1, 1, 1, 2, 2, 2, 2]
    expected = [0, 2, 0, 1, 1]
    input = np.array([1, 1, 3, 4, 3, 3, 3, 3])
    output = ndimage.histogram(input, 0, 4, 5, labels, 1)
    assert_array_almost_equal(output, expected)
import pylab as pl
import numpy as np
from scipy import ndimage
from scipy.stats import multivariate_normal

import sys

#img2 = pl.imread("converse2.jpg")
#img2 = pl.imread("obraz.png")
img2 = pl.imread(sys.argv[1])
s = img2.shape
print ("Min: {}, max: {}".format(np.min(img2),np.max(img2)))

hist = ndimage.histogram(img2, 0, 1, 256)
colors = 16
dcolors = np.linspace(0.0, 100.0, colors)
for i in dcolors:
    print ("Percentile: {} equals: {}".format(i, np.percentile(img2, i)))


#    print("Kolor: {}".format(s))
 def measure_TAS(self, pixels, labels, n, m):
     # I'll put some documentation in here to explain what it does.
     # If someone ever wants to call it, their editor might display
     # the documentation.
     '''Measure the intensity of the image with Zernike (N, M)
     
     pixels - the intensity image to be measured
     labels - the labels matrix that labels each object with an integer
     n, m - the Zernike coefficients.
     
     See http://en.wikipedia.org/wiki/Zernike_polynomials for an
     explanation of the Zernike polynomials
     '''
     #
     # The strategy here is to operate on the whole array instead
     # of operating on one object at a time. The most important thing
     # is to avoid having to run the Python interpreter once per pixel
     # in the image and the second most important is to avoid running
     # it per object in case there are hundreds of objects.
     #
     # We play lots of indexing tricks here to operate on the whole image.
     # I'll try to explain some - hopefully, you can reuse.
     #
     # You could move the calculation of the minimum enclosing circle
     # outside of this function. The function gets called more than
     # 10 times, so the same calculation is performed 10 times.
     # It would make the code a little more confusing, so I'm leaving
     # it as-is.
     ###########################################
     #
     # The minimum enclosing circle (MEC) is the smallest circle that
     # will fit around the object. We get the centers and radii of
     # all of the objects at once. You'll see how that lets us
     # compute the X and Y position of each pixel in a label all at
     # one go.
     #
     # First, get an array that lists the whole range of indexes in
     # the labels matrix.
     #
     
     if len(labels)==0:
         n_objects = 0
     else:
         n_objects = np.max(labels)
         
     if n_objects==0:
         result = np.zeros((0,))
     else:
     
         indexes = np.arange(1, np.max(labels)+1,dtype=np.int32)
         
         # Calculate mean of pixels above int 30
         mean=np.mean(pixels[pixels>0.118])
         
         # Set ranges
         rangelow = mean + n
         rangehigh = mean + m
         
         # Threshold image, create mask
         mask=np.logical_and(pixels<rangehigh,pixels>rangelow)
         thresholded_image=np.zeros(np.shape(pixels))
         thresholded_image[mask]=1
         
         # Apply convolution to get the sum of sorrounding pixels
         # First define a weight array
         w=np.array([[1,1,1],[1,0,1],[1,1,1]])
         # Create a new array of sums
         sums=scind.convolve(thresholded_image,w,mode='constant')
         # remove 0 pixels from sums
         sums[~mask]=9
         
         # Get the histogram
         result = fix(scind.histogram(sums.astype(int),0,9,10,labels=labels, index=indexes))
         #print np.shape(result)
         result = np.vstack(result).T.astype(np.float64)
         #result = np.random.rand(2,9).T
         result = result/np.sum(result,axis=0)
         #result = result[0:9]
         #result = (result/np.sum(result)).astype(np.float64)
         #
         # And we're done! Did you like it? Did you get it?
         #
     return result[0:9]
Example #14
0
def test_histogram01():
    expected = np.ones(10)
    input = np.arange(10)
    output = ndimage.histogram(input, 0, 10, 10)
    assert_array_almost_equal(output, expected)
import numpy as np
np.max(mask2)
np.max(mask)
img3 = mask*img+(1.0-mask)*img2
pl.imshow(mask3, cmap=pl.gray())
pl.imshow(img3, cmap=pl.gray())
pl.show()
img3 = mask2*img+(1.0-mask2)*img2
pl.imshow(img3, cmap=pl.gray())
pl.show()
img3 = (1.0 - mask2)*img+mask2*img2
pl.imshow(mask3, cmap=pl.gray())
pl.imshow(img3, cmap=pl.gray())
pl.show()
from skimage import date
hist = ndimage.histogram(img, 0, 255, 256)
hist
a = sum(hist)
a
for i in range(0,256):

    pass
s = 0
for i in range(0,256):
    s += hist[i]
    if s >= (a/2.0):
        print (i)
        break
    
s
a
Example #16
0
import pylab

print ('********************************************************************************')
print ('* Building a histogram using SciPy, NumPy and Pylab                            *')
print ('********************************************************************************')
import sys
sys.path.append('../utils')
import userinput
fpath = userinput.get_img_path()
img_array = userinput.get_gray_img(fpath)


# builds the histogram using ndimage.histogram()...
print ('building histogram...')
bins_count = 256
histogram = ndimage.histogram(img_array, 0, 255, bins_count)

# plots the histogram using pylab...
print ('ploting histogram...')
x_label = 'pixel'
y_label = 'number of pixels'
pylab.plot(histogram)
pylab.axis(xmax=bins_count)
pylab.axes().set_xlabel(x_label)
pylab.axes().set_ylabel(y_label)

hpath = fpath + '-histogram.png'
print ('saving histogram to file \'' + hpath + '\'...')
pylab.savefig(fpath + '-histogram.png', format='png')

print ('showing histogram...')
Example #17
0
def intensityCutImage(imageData, cutLevels):
    """Creates a matplotlib.pylab plot of an image array with the specified cuts in intensity
    applied. This routine is used by L{saveBitmap} and L{saveContourOverlayBitmap}, which both
    produce output as .png, .jpg, etc. images.
    
    @type imageData: numpy array
    @param imageData: image data array
    @type cutLevels: list
    @param cutLevels: sets the image scaling - available options:
        - pixel values: cutLevels=[low value, high value].
        - histogram equalisation: cutLevels=["histEq", number of bins ( e.g. 1024)]
        - relative: cutLevels=["relative", cut per cent level (e.g. 99.5)]
        - smart: cutLevels=["smart", cut per cent level (e.g. 99.5)]
    ["smart", 99.5] seems to provide good scaling over a range of different images.
    @rtype: dictionary
    @return: image section (numpy.array), matplotlib image normalisation (matplotlib.colors.Normalize), in the format {'image', 'norm'}.
    
    @note: If cutLevels[0] == "histEq", then only {'image'} is returned.
    
    """
    
    oImWidth=imageData.shape[1]
    oImHeight=imageData.shape[0]
                    
    # Optional histogram equalisation
    if cutLevels[0]=="histEq":
        
        imageData=histEq(imageData, cutLevels[1])
        anorm=pylab.normalize(imageData.min(), imageData.max())
        
    elif cutLevels[0]=="relative":
        
        # this turns image data into 1D array then sorts
        sorted=numpy.sort(numpy.ravel(imageData))	
        maxValue=sorted.max()
        minValue=sorted.min()
        
        # want to discard the top and bottom specified
        topCutIndex=len(sorted-1) \
            -int(math.floor(float((100.0-cutLevels[1])/100.0)*len(sorted-1)))
        bottomCutIndex=int(math.ceil(float((100.0-cutLevels[1])/100.0)*len(sorted-1)))
        topCut=sorted[topCutIndex]
        bottomCut=sorted[bottomCutIndex]
        anorm=pylab.normalize(bottomCut, topCut)
        
    elif cutLevels[0]=="smart":
        
        # this turns image data into 1Darray then sorts
        sorted=numpy.sort(numpy.ravel(imageData))	
        maxValue=sorted.max()
        minValue=sorted.min()
        numBins=10000 		# 0.01 per cent accuracy
        binWidth=(maxValue-minValue)/float(numBins)
        histogram=ndimage.histogram(sorted, minValue, maxValue, numBins)
        
        # Find the bin with the most pixels in it, set that as our minimum
        # Then search through the bins until we get to a bin with more/or the same number of
        # pixels in it than the previous one.
        # We take that to be the maximum.
        # This means that we avoid the traps of big, bright, saturated stars that cause
        # problems for relative scaling
        backgroundValue=histogram.max()
        foundBackgroundBin=False
        foundTopBin=False
        lastBin=-10000					
        for i in range(len(histogram)):
            
            if histogram[i]>=lastBin and foundBackgroundBin==True:
                
                # Added a fudge here to stop us picking for top bin a bin within 
                # 10 percent of the background pixel value
                if (minValue+(binWidth*i))>bottomBinValue*1.1:
                    topBinValue=minValue+(binWidth*i)
                    foundTopBin=True
                    break
            
            if histogram[i]==backgroundValue and foundBackgroundBin==False:
                bottomBinValue=minValue+(binWidth*i)
                foundBackgroundBin=True

            lastBin=histogram[i]
        
        if foundTopBin==False:
            topBinValue=maxValue
         
        #Now we apply relative scaling to this
        smartClipped=numpy.clip(sorted, bottomBinValue, topBinValue)
        topCutIndex=len(smartClipped-1) \
            -int(math.floor(float((100.0-cutLevels[1])/100.0)*len(smartClipped-1)))
        bottomCutIndex=int(math.ceil(float((100.0-cutLevels[1])/100.0)*len(smartClipped-1)))
        topCut=smartClipped[topCutIndex]
        bottomCut=smartClipped[bottomCutIndex]
        anorm=pylab.normalize(bottomCut, topCut)
    else:
        
        # Normalise using given cut levels
        anorm=pylab.normalize(cutLevels[0], cutLevels[1])
    
    if cutLevels[0]=="histEq":
        return {'image': imageData.copy()}
    else:
        return {'image': imageData.copy(), 'norm': anorm}
def test_histogram01():
    "histogram 1"
    expected = np.ones(10)
    input = np.arange(10)
    output = ndimage.histogram(input, 0, 10, 10)
    assert_array_almost_equal(output, expected)