def test_bilateral(): image = np.zeros((21, 21), dtype=np.uint16) selem = np.ones((3, 3), dtype=np.uint8) image[10, 10] = 1000 image[10, 11] = 1010 image[10, 9] = 900 assert rank.mean_bilateral(image, selem, s0=1, s1=1)[10, 10] == 1000 assert rank.pop_bilateral(image, selem, s0=1, s1=1)[10, 10] == 1 assert rank.mean_bilateral(image, selem, s0=11, s1=11)[10, 10] == 1005 assert rank.pop_bilateral(image, selem, s0=11, s1=11)[10, 10] == 2
(median filters already achieved this), here we use the **bilateral** filter that restricts the local neighborhood to pixel having a gray-level similar to the central one. .. note:: A different implementation is available for color images in `skimage.filter.denoise_bilateral`. """ from skimage.filter.rank import mean_bilateral noisy_image = img_as_ubyte(data.camera()) bilat = mean_bilateral(noisy_image.astype(np.uint16), disk(20), s0=10, s1=10) fig, ax = plt.subplots(2, 2, figsize=(10, 7)) ax1, ax2, ax3, ax4 = ax.ravel() ax1.imshow(noisy_image, cmap=plt.cm.gray) ax1.set_title('Original') ax1.axis('off') ax2.imshow(bilat, cmap=plt.cm.gray) ax2.set_title('Bilateral mean') ax2.axis('off') ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray) ax3.axis('off')
def segmentation(sample): samples = [] cores = [] images = [] cf = .9 entropy_ratio = .5 core_ratio = .07 #around 10% of image is core eq_thresh = 10 csize = 300 #change it later # for sample in samples: # if image_number<5: # image_number += 1 # continue # image_number += 1 try: gray_images = np.array([i for i in open_image(sample)]) except: print 'can\'t open' # continue m, n = gray_images[0].shape g_min = np.min(gray_images[1:], axis=0) g_max = np.max(gray_images[1:], axis=0) g_avg = np.average(gray_images[1:], axis=0) g_mean = rank.mean_bilateral(g_max, disk(40)) images.append(g_max) selem = disk(5) diff = g_max-g_min diff1 = g_avg-g_min diff2 = g_max-g_avg h2 = histogram(diff2) ''' equalize image -> cores are white or black ''' equalized = img_as_ubyte(exposure.equalize_hist(diff)) #equalized = img_as_ubyte(exposure.equalize_hist(g_min))#g_min equalized = exposure.adjust_gamma(g_max,2) ##eq_mask = [] #equalized = img_as_ubyte(exposure.equalize_hist(mask)) #eq_mask = equalized<eq_thresh ''' local otsu ''' radius = 20 selem = disk(radius) local_otsu = rank.otsu(equalized, selem) # local_otsu = tmp<threshold_otsu(equalized) bg = diff<=local_otsu ent = rank.entropy(g_max*~bg, disk(35)) grad = rank.gradient(g_mean, disk(50)) tmp = ent*grad core_mask = tmp>(np.min(tmp)+(np.max(tmp)-np.min(tmp))*entropy_ratio) # # h = histogram(local_otsu) # cdf = 0 # t = g_min.shape[0]*g_min.shape[1]*core_ratio # # for i in range(len(h)): # cdf += h[i] # if cdf > t: # maxi = i # break # # core_mask = (local_otsu<maxi) ## imshow(core_mask) # ##cores = np.logical_and(eq_mask, core_mask) # ##imshow(eq_mask) # # # # lbl, num_lbl = ndi.label(core_mask) for i in range(1,num_lbl+1): ''' lbl==0 is background ''' c = np.where(np.max(lbl==i, axis=0)==True)[0] left = c[0] right = c[-1] c = np.where(np.max(lbl==i, axis=1)==True)[0] up = c[0] down = c[-1] # ''' # Don't consider edge cores # ''' # if left<csize/2 or right>n-csize/2: # continue # if up<csize/2 or down>m-csize/2: # continue # core = np.zeros((csize, csize)) h = down-up w = right-left middle_x = min(max((up+down)/2, csize/2),m-csize/2) middle_y = min(max((left+right)/2, csize/2), n-csize/2) # core = (core_mask*gray_images[0])[middle_x-csize/2:middle_x+csize/2, middle_y-csize/2:middle_y+csize/2] core = gray_images[0][middle_x-csize/2:middle_x+csize/2, middle_y-csize/2:middle_y+csize/2] core = exposure.adjust_gamma(core,.5) cores.append(core) return cores # print 'image', image_number # #if __name__=='__main__': # os.system('rm %s -R'%(output_dir)) # os.system('mkdir %s'%(output_dir)) # #os.system('mkdir %sres/021'%(input_dir)) # #os.system('mkdir %sres/041'%(input_dir)) # for i in range(len(cores)): # image_name = '%s/%i.png'%(output_dir, i) # imsave(image_name, cores[i])
filtering rate for continuous area (i.e. background) while higher image frequencies remain untouched. """ import numpy as np import matplotlib.pyplot as plt from skimage import data from skimage.morphology import disk from skimage.filter import rank image = (data.coins()).astype(np.uint16) * 16 selem = disk(20) percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9) bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500) normal_result = rank.mean(image, selem=selem) fig, axes = plt.subplots(nrows=3, figsize=(8, 10)) ax0, ax1, ax2 = axes ax0.imshow(np.hstack((image, percentile_result))) ax0.set_title('Percentile mean') ax0.axis('off') ax1.imshow(np.hstack((image, bilateral_result))) ax1.set_title('Bilateral mean') ax1.axis('off') ax2.imshow(np.hstack((image, normal_result))) ax2.set_title('Local mean')
try: image_gray= io.imread(filePath+baseName+str(i).zfill(6)+'-'+str(j).zfill(2)+'.pgm') # Load the image! except: loadedImage=False if loadedImage: # (Do nothing otherwise) # We run a median filter over the image to eliminate both isolated 'hot' and 'cold' pixels, # due to the camera itself or random noise, without actually blurring the image. This adds # slightly unpredictable distortion to the image, but because we only consider a tiny local # neighborhood of each pixel, in practice it doesn't matter. image_gray=median(image_gray, disk(1)) # The bilateral mean is effectively a selective Gaussian blur, # smoothing the image without mixing across edges of substantially # different structures. image_gray= rank.mean_bilateral(image_gray,selem=disk10, s0=5, s1=5) # Average and max brightness across entire image... imAvBrightness=np.mean(image_gray) imMaxBrightness=np.max(image_gray) # Use DoG method to find 'blobs' (hypothetical cells in general size range of cells) # in image, using slightly dynamic thresholding based on overall image brightness blobs_dog = blob_dog(image_gray,min_sigma = 2, max_sigma=8, threshold=0.02+0.02*imMaxBrightness/255.0,overlap=0.9) if len(blobs_dog)>0: # Proceed with cell analysis if we see any blobs blobs_dog[:,2]=blobs_dog[:,2]*sqrt(2) # Convert column to approximate radius of blob blobdata=[] # Various data about blobs blobpos=[] # Positions of blobs sentBack=False # Part of control code for labeling training sets; irrelevant firstblob=True # Because the first blob forms the first row in a Numpy array
frequencies remain untouched. """ import numpy as np import matplotlib.pyplot as plt from skimage import data from skimage.morphology import disk from skimage.filter import rank image = (data.coins()).astype(np.uint16) * 16 selem = disk(20) percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9) bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500) normal_result = rank.mean(image, selem=selem) fig, axes = plt.subplots(nrows=3, figsize=(8, 10)) ax0, ax1, ax2 = axes ax0.imshow(np.hstack((image, percentile_result))) ax0.set_title('Percentile mean') ax0.axis('off') ax1.imshow(np.hstack((image, bilateral_result))) ax1.set_title('Bilateral mean') ax1.axis('off') ax2.imshow(np.hstack((image, normal_result)))