def _get_line_filter(segment_size, variation): """Computes the filters that can be used to enhance vertical lines in an Image. Args: segment_size: Size of the segment variatoin: Variation in horizontal axes if user wants not exact vertical lines. Returns: filters saved in 3D matrices, each 3rd dimension includes a filter """ smalldisk = pymorph.sedisk(1); bigdisk = pymorph.sedisk(2); horizontal_filter = numpy.zeros((variation*2+1, variation*2+1, segment_size)) horizontal_surrounding = numpy.zeros((variation*2+1, variation*2+1, segment_size)) index = -1 # Generates the filters for each direction of lines for variation_index in range(-variation, variation + 1): index = index + 1; points = bresenham(variation + variation_index,0, variation - variation_index, segment_size - 1) tmp = numpy.zeros((variation*2+1)*segment_size).reshape((variation*2+1, segment_size)) for point_ind in range(0, len(points)): tup_point = points[point_ind] tmp[tup_point[0], tup_point[1]] = 1 tmp_filter = pymorph.dilate(pymorph.binary(tmp), smalldisk) tmp_surrounding = pymorph.subm(pymorph.dilate(pymorph.binary(tmp), bigdisk) , \ pymorph.dilate(pymorph.binary(tmp), smalldisk)) horizontal_filter[index,:,:] = tmp_filter horizontal_surrounding[index,:,:] = tmp_surrounding return horizontal_filter, horizontal_surrounding
def refineMask(mask, imageSeries, numDilations=3, thresh=0.5, se=None): def corrMaskWithSourcePreConv(imageSeriesSmoothed, dilatedBinaryMask, sourceSmoothed): corrImage = np.zeros((imageSeries.shape[0], imageSeries.shape[1])) bounds = np.squeeze(pymorph.blob(dilatedMask, 'boundingbox', output='data')) for x in range(bounds[1], bounds[3]): for y in range(bounds[0], bounds[2]): if dilatedBinaryMask[x,y]>0: corr = stats.pearsonr(sourceSmoothed[1:-1], imageSeriesSmoothed[x,y,:])[0] corrImage[x,y] = corr return corrImage # calculate box for smoothing box = sig.boxcar(3) box = box / box.sum() imageSeriesSmoothed = nd.convolve1d(imageSeries, box, axis=2, mode='mirror') completeRefinedMask = np.zeros_like(mask) if se is None: se = np.array([[0,1,0],[1,1,1],[0,1,0]]) #se = np.array([[1,1,1],[1,1,1],[1,1,1]]) seedMask = mask.copy() > 0 for rep in range(numDilations): seedMask = pymorph.dilate(seedMask, se) for maskIndex in range(1,mask.max()+1): origMask = mask == maskIndex dilatedOrigMask = origMask.copy() > 0 for rep in range(numDilations): dilatedOrigMask = pymorph.dilate(dilatedOrigMask, se) forbiddenMask = np.logical_or(np.logical_and(seedMask, np.logical_not(dilatedOrigMask)), pymorph.dilate(completeRefinedMask)) # make smoothed source source = avgFromROIInSeries(imageSeries, origMask) sourceSmoothed = np.convolve(source, box) dilatedMask = (mask==maskIndex).copy() for rep in range(numDilations+1): dilatedMask = pymorph.dilate(dilatedMask) corrMask = corrMaskWithSourcePreConv(imageSeriesSmoothed, dilatedOrigMask, sourceSmoothed) threshMask = corrMask >= thresh newMask = np.logical_and(np.logical_not(forbiddenMask), np.logical_or(threshMask, origMask)) #completeRefinedMask = np.logical_xor(completeRefinedMask, newMask) completeRefinedMask += (newMask>0)*maskIndex #pdb.set_trace() completeRefinedMask[completeRefinedMask > maskIndex] = 0 return completeRefinedMask
def test_dilate(): f = np.zeros((8,8), np.bool) Bs = [np.reshape(B, (3,3)) for B in ( [1,1,0, 1,1,0, 0,0,0], [1,0,0, 1,1,0, 0,0,0], [1,0,0, 0,1,0, 0,0,0], [0,1,0, 0,1,0, 0,0,0], )] for B in Bs: assert pymorph.dilate(f, B != 0).sum() == 0 assert pymorph.dilate(f, B).sum() == 0
def test_dilate(): f = np.zeros((8, 8), np.bool) Bs = [ np.reshape(B, (3, 3)) for B in ( [1, 1, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 1, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0], ) ] for B in Bs: assert pymorph.dilate(f, B != 0).sum() == 0 assert pymorph.dilate(f, B).sum() == 0
def run_one(fname, outname): N = 1 im = img.open(fname) #im = im.filter(ImageFilter.BLUR) im = im.resize((600, 600), img.ANTIALIAS) im = im.convert('L') im = invert(im) x = np.asarray(im) y = x size = 3 for i in range(N): y = morph.dilate(y, morph.sedisk(size)) y = morph.close(y, morph.sedisk(size)) jm = img.fromarray(y) jm = invert(jm) jm = jm.resize((400, 400), img.ANTIALIAS) jm.save(outname)
def create_shad(matte, target): """ Creates a shadowed image given target (to be shadowed) and matte. The matte can be smaller that the target in which case it will be used at some random location. """ # first get a bounding box of the shadow matte mask = np.array(matte < 1, dtype=int) mask = dilate(erode(mask, sedisk(3)), sedisk(3)) left, upper, right, lower = PIL.Image.fromarray(mask).getbbox() # now cut it out mh, mw = matte.shape[:2] matte_bbox = matte[upper:lower, left:right] # import pdb; pdb.set_trace() # get new dimensions mh, mw = matte_bbox.shape[:2] th, tw = target.shape[:2] new_matte = np.ones(target.shape) # get random position to insert the matte matte_x = matte_y = 0 if mh < th: matte_y = (th - mh) * np.random.random() if mw < tw: matte_x = (tw - mw) * np.random.random() new_matte[matte_y:matte_y + mh, matte_x:matte_x + mw] = matte_bbox return new_matte * target
def morph_sharp(im): img = numpy.zeros(im.shape,dtype=numpy.int32) # Choose a disk structuring element (3x3 disk) # Function could be modified to pass structuring element in se = pymorph.sedisk(r=1,dim=2) # Apply grayscale erosion Ie = pymorph.erode(im,se) # Apply grayscale dilation Id = pymorph.dilate(im,se) for i in range(0,im.shape[0]): for j in range(0,im.shape[1]): # Compute differences between original image and processed da = Id[i][j] - im[i][j] db = im[i][j] - Ie[i][j] if da < db: img[i][j] = Id[i][j] elif da > db: img[i][j] = Ie[i][j] else: img[i][j] = im[i][j] return img
def clean_mask(mask): # first erode to get rid of noise mask = erode(mask, sedisk(2)) # then dilate more to capture a slightly larger area mask = dilate(mask, sedisk(16)) return mask
def morph_sharp(im): img = numpy.zeros(im.shape, dtype=numpy.int32) # Choose a disk structuring element (3x3 disk) # Function could be modified to pass structuring element in se = pymorph.sedisk(r=1, dim=2) # Apply grayscale erosion Ie = pymorph.erode(im, se) # Apply grayscale dilation Id = pymorph.dilate(im, se) for i in range(0, im.shape[0]): for j in range(0, im.shape[1]): # Compute differences between original image and processed da = Id[i][j] - im[i][j] db = im[i][j] - Ie[i][j] if da < db: img[i][j] = Id[i][j] elif da > db: img[i][j] = Ie[i][j] else: img[i][j] = im[i][j] return img
def find_ROI(In): Ain = cv2array(In)[:, :, 0] T = cv.CloneImage(In) cv.Threshold(In, T, 1, 255, cv.CV_THRESH_OTSU | cv.CV_THRESH_BINARY) I = cv2array(T)[:, :, 0] I = I > 0 # Cadre blanc M = np.ones(np.shape(I)) M[1: np.shape(I)[0] - 1, 1: np.shape(I)[1] - 1] = 0 # Enleve bord I = pymorph.erode(pymorph.erode(pymorph.erode(I))) M2 = M * I > 0 M1 = M2 * 0 while abs(np.sum(M1 - M2)) > 0.1: M1 = M2 M2 = pymorph.dilate(M2) M2 = M2 * I M2 = pymorph.dilate(pymorph.dilate(pymorph.dilate(M2))) return M2 * Ain
def _getLineFilter(self, segmentSize, variation): smallDisk = pymorph.sedisk(1); bigDisk = pymorph.sedisk(2); horizontal_filter = numpy.zeros((variation*2+1,variation*2+1,segmentSize)) horizontal_surrounding = numpy.zeros((variation*2+1,variation*2+1,segmentSize)) index = -1 for i in range(-variation,variation+1): index = index + 1; # find the line between selected points points = bresenham(variation+i,0,variation-i,segmentSize-1) tmp = numpy.zeros((variation*2+1)*segmentSize).reshape((variation*2+1, segmentSize)) for l in range(0, len(points)): tup_point = points[l] tmp[tup_point[0], tup_point[1]] = 1 tmp_filter = pymorph.dilate(pymorph.binary(tmp), smallDisk) tmp_surrounding = pymorph.subm(pymorph.dilate(pymorph.binary(tmp), bigDisk) , pymorph.dilate(pymorph.binary(tmp), smallDisk)) horizontal_filter[index,:,:] = tmp_filter horizontal_surrounding[index,:,:] = tmp_surrounding return horizontal_filter, horizontal_surrounding
def Enleve_bord(im_lbl, mask=None): im_lbl = im_lbl * (im_lbl > 0) cadre = np.ones(im_lbl.shape) cadre[2: (im_lbl.shape[0] - 2), 2: (im_lbl.shape[1] - 2)] = 0 if mask is not None: import pymorph mask = pymorph.dilate(mask) cadre += mask > 0 print(cadre) lbl_bord = np.unique(cadre * im_lbl) for i in lbl_bord: im_lbl[im_lbl == i] = 0 return im_lbl
def _get_line_filter(segment_size, variation): """Computes the filters that can be used to enhance vertical lines in an Image. Args: segment_size: Size of the segment variatoin: Variation in horizontal axes if user wants not exact vertical lines. Returns: filters saved in 3D matrices, each 3rd dimension includes a filter """ smalldisk = pymorph.sedisk(1) bigdisk = pymorph.sedisk(2) horizontal_filter = numpy.zeros( (variation * 2 + 1, variation * 2 + 1, segment_size)) horizontal_surrounding = numpy.zeros( (variation * 2 + 1, variation * 2 + 1, segment_size)) index = -1 # Generates the filters for each direction of lines for variation_index in range(-variation, variation + 1): index = index + 1 points = bresenham(variation + variation_index, 0, variation - variation_index, segment_size - 1) tmp = numpy.zeros((variation * 2 + 1) * segment_size).reshape( (variation * 2 + 1, segment_size)) for point_ind in range(0, len(points)): tup_point = points[point_ind] tmp[tup_point[0], tup_point[1]] = 1 tmp_filter = pymorph.dilate(pymorph.binary(tmp), smalldisk) tmp_surrounding = pymorph.subm(pymorph.dilate(pymorph.binary(tmp), bigdisk) , \ pymorph.dilate(pymorph.binary(tmp), smalldisk)) horizontal_filter[index, :, :] = tmp_filter horizontal_surrounding[index, :, :] = tmp_surrounding return horizontal_filter, horizontal_surrounding
def bright_object_detection(image): """ Perform bright object detection on an array image.""" # Store all intermediate steps in a dictionary. Useful for debugging. steps = dict() steps['input'] = image # Reduce noise using a median filter. med_filter_size = (MED_SIZE, MED_SIZE, MED_SIZE) steps['median'] = ndimg.median_filter(steps['input'], med_filter_size) # Convert median filtered image to grayscale. steps['luminance'] = scikits.image.color.rgb2gray(steps['median']) * 255. # Compute local pixel average. k_avg = np.ones((AVG_SIZE, AVG_SIZE)) / AVG_SIZE**2 steps['average'] = ndimg.convolve(steps['luminance'], k_avg) # Compute local pixel variance. steps['diff_mean'] = steps['luminance'] - steps['average'] steps['diff_mean_sq'] = steps['diff_mean'] * steps['diff_mean'] steps['variance'] = ndimg.convolve(steps['diff_mean_sq'], k_avg) # Compute binary threshold image using mahalonobis distance. Use the sign # of the difference between the pixel and its local mean to ignore dark # pixels. steps['maha_sq'] = (steps['diff_mean'] > 0) * steps['diff_mean_sq'] / \ steps['variance'] steps['thresh_maha'] = (steps['maha_sq'] > (NUM_STDDEV * NUM_STDDEV)) # Integrate global illumination effects by taking a top percentage of # intensities from the detected light regions. steps['masked_regions_lum'] = steps['thresh_maha'] * steps['luminance'] steps['masked_regions_hist'] = pymorph.histogram(steps['masked_regions_lum']) steps['global_bright_thresh'] = int((len(steps['masked_regions_hist']) * \ (1.0 - GLOBAL_BRIGHT_PCT)) + 0.5) steps['thresh_global'] = steps['masked_regions_lum'] >= \ steps['global_bright_thresh'] # Morphological operations on detected blobs. steps['detect_erode'] = pymorph.erode(steps['thresh_global']) steps['detect_dilate'] = pymorph.dilate(steps['detect_erode']) # Count bright objects. Connected components and raw pixels. steps['detect_labels'] = pymorph.label(steps['detect_dilate']) steps['bright_blob_count'] = steps['detect_labels'].max() steps['bright_pixel_count'] = sum(steps['masked_regions_hist'] [steps['global_bright_thresh']:]) return steps
def b(image, w=2, sigma=25, k=15): mask = -sigma * np.array([[1,1,1], [1,0,1], [1,1,1]]) erosion = np.copy(image) dilation = np.copy(image) for i in range(k): erosion = erode(erosion, mask) dilation = dilate(dilation, mask) filtered = np.copy(image) for i in range(w, image.shape[0]-w): for j in range(w, image.shape[1]-w): slice = np.array(image)[i-w:i+w+1, j-w:j+w+1] if (dilation[i][j] - image[i][j] > image[i][j] - erosion[i][j]): filtered[i][j] = 0 alpha = dominant_direction(slice, np.std) if alpha != None: alpha = math.radians(alpha + 90) filtered[i][j] = cv2.dilate(np.array(slice, np.uint8), np.array(direction(slice, alpha), np.uint8), iterations = k)[w][0] return filtered
def morph_toggleCE(im): img = numpy.zeros(im.shape,dtype=numpy.int32) se = pymorph.sedisk(r=1,dim=2) Ie = pymorph.erode(im,se) Id = pymorph.dilate(im,se) for i in range(0,im.shape[0]): for j in range(0,im.shape[1]): da = Id[i][j] - im[i][j] db = im[i][j] - Ie[i][j] if da < db: img[i][j] = Id[i][j] else: img[i][j] = Ie[i][j] return img
def morph_toggleCE(im): img = numpy.zeros(im.shape, dtype=numpy.int32) se = pymorph.sedisk(r=1, dim=2) Ie = pymorph.erode(im, se) Id = pymorph.dilate(im, se) for i in range(0, im.shape[0]): for j in range(0, im.shape[1]): da = Id[i][j] - im[i][j] db = im[i][j] - Ie[i][j] if da < db: img[i][j] = Id[i][j] else: img[i][j] = Ie[i][j] return img
def region_prop(fig, subfig): # Inspired by: # http://stackoverflow.com/a/9059648/621449 c = subfig # set up the 'FilledImage' bit of regionprops. FilledImage = np.zeros(fig.shape[0:2]).astype('uint8') # set up the 'ConvexImage' bit of regionprops. ConvexImage = np.zeros(fig.shape[0:2]).astype('uint8') # calculate some things useful later: m = cv2.moments(c) # ** regionprops ** Area = m['m00'] Perimeter = cv2.arcLength(c,True) # bounding box: x,y,width,height BoundingBox = cv2.boundingRect(c) # centroid = m10/m00, m01/m00 (x,y) Centroid = ( m['m10']/m['m00'],m['m01']/m['m00'] ) # EquivDiameter: diameter of circle with same area as region EquivDiameter = np.sqrt(4*Area/np.pi) # Extent: ratio of area of region to area of bounding box Extent = Area/(BoundingBox[2]*BoundingBox[3]) # FilledImage: draw the region on in white cv2.drawContours( FilledImage, [c], 0, color=255, thickness=-1 ) # calculate indices of that region.. regionMask = (FilledImage==255) # FilledArea: number of pixels filled in FilledImage FilledArea = np.sum(regionMask) # PixelIdxList : indices of region. # (np.array of xvals, np.array of yvals) PixelIdxList = regionMask.nonzero() # CONVEX HULL stuff # convex hull vertices ConvexHull = cv2.convexHull(c) ConvexArea = cv2.contourArea(ConvexHull) # Solidity := Area/ConvexArea Solidity = Area/ConvexArea # convexImage -- draw on ConvexImage cv2.drawContours( ConvexImage, [ConvexHull], -1, color=255, thickness=-1 ) # ELLIPSE - determine best-fitting ellipse. centre,axes,angle = cv2.fitEllipse(c) MAJ = np.argmax(axes) # this is MAJor axis, 1 or 0 MIN = 1-MAJ # 0 or 1, minor axis # Note: axes length is 2*radius in that dimension MajorAxisLength = axes[MAJ] MinorAxisLength = axes[MIN] Eccentricity = np.sqrt(1-(axes[MIN]/axes[MAJ])**2) Orientation = angle EllipseCentre = centre # x,y Test = FilledImage.astype('uint8') mf = cv2.moments(Test) CentroidFilled = ( mf['m10']/mf['m00'],mf['m01']/mf['m00'] ) # # ** if an image is supplied with the fig: # # Max/Min Intensity (only meaningful for a one-channel img..) # MaxIntensity = np.max(img[regionMask]) # MinIntensity = np.min(img[regionMask]) # # Mean Intensity # MeanIntensity = np.mean(img[regionMask],axis=0) # # pixel value # PixelValues = img[regionMask] x0, y0, dx, dy = BoundingBox x1, y1 = x0 + dx, y0 + dy Image = fig[y0:y1, x0:x1] FilledImageFit = FilledImage[y0:y1, x0:x1] OImage = fig[y0-1:y1+1, x0-1:x1+1] NumPixels = Image.sum() Fillity = (NumPixels+0.0)/FilledArea crx, cry = (CentroidFilled[0]-x0, CentroidFilled[1]-y0) dxc = crx-(x1-x0)/2.0 dyc = cry-(y1-y0)/2.0 CentLength = math.sqrt(dxc*dxc + dyc*dyc) e = lambda fig: pymorph.erode(fig) d = lambda fig: pymorph.dilate(fig) o = lambda fig: pymorph.open(fig) c = lambda fig: pymorph.close(fig) a = lambda fun, n: reduce(lambda f1, f2: lambda x: f1(f2(x)), [fun]*n, lambda x: x) Thin = pymorph.thin(OImage) if num_holes(Image) >= 2: Inner = removeOuter(Thin) Inner = (a(d,7))(Inner>0) Outer = OImage > Inner ret = dict((k,v) for k, v in locals().iteritems() if k[0].isupper()) return ret
import cv2 import numpy import numpy as np import scipy import pylab as pl import pylab import pymorph from scipy import misc def s(fig): pl.imshow(fig); pl.gray(); pl.show() e = lambda fig: pymorph.erode(fig) d = lambda fig: pymorph.dilate(fig) o = lambda fig: pymorph.open(fig) c = lambda fig: pymorph.close(fig) a = lambda fun, n: reduce(lambda f1, f2: lambda x: f1(f2(x)), [fun]*n, lambda x: x) img= 255-cv2.imread('reps/2/2.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) imgb = img > 128 BW=imgb # grab contours cs,_ = cv2.findContours( BW.astype('uint8'), mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE ) # set up the 'FilledImage' bit of regionprops. filledI = np.zeros(BW.shape[0:2]).astype('uint8') # set up the 'ConvexImage' bit of regionprops. convexI = np.zeros(BW.shape[0:2]).astype('uint8') # for each contour c in cs: # will demonstrate with cs[0] but you could use a loop.
steps['global_bright_thresh'] = int((len(steps['masked_regions_hist']) * \ (1.0 - GLOBAL_BRIGHT_PCT)) + 0.5) steps['thresh_global'] = steps['masked_regions_lum'] >= \ steps['global_bright_thresh'] print "Global filtered mask:" plab.imshow(pymorph.overlay(steps['luminance'].astype('uint8'), steps['thresh_global'])) ############################################################################### # Morpohological operations on detected blobs. # <demo> stop # <demo> auto steps['detect_erode'] = pymorph.erode(steps['thresh_global']) steps['detect_dilate'] = pymorph.dilate(steps['detect_erode']) print "Morphed mask (erode, dilate):" plab.imshow(pymorph.overlay(steps['luminance'].astype('uint8'), steps['detect_dilate'])) # <demo> stop # <demo> auto # Count bright objects. Connected components and raw pixels. steps['detect_labels'] = pymorph.label(steps['detect_dilate']) steps['bright_blob_count'] = steps['detect_labels'].max() print "Bright blob count:", steps['bright_blob_count'] steps['bright_pixel_count'] = sum(steps['masked_regions_hist'] [steps['global_bright_thresh']:]) print "Bright pixel count:", steps['bright_pixel_count']
import numpy as np import scipy import pylab as pl import pylab import pymorph from scipy import misc def s(fig): pl.imshow(fig) pl.gray() pl.show() e = lambda fig: pymorph.erode(fig) d = lambda fig: pymorph.dilate(fig) o = lambda fig: pymorph.open(fig) c = lambda fig: pymorph.close(fig) a = lambda fun, n: reduce(lambda f1, f2: lambda x: f1(f2(x)), [fun] * n, lambda x: x) img = 255 - cv2.imread('reps/2/2.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) imgb = img > 128 BW = imgb # grab contours cs, _ = cv2.findContours(BW.astype('uint8'), mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE) # set up the 'FilledImage' bit of regionprops. filledI = np.zeros(BW.shape[0:2]).astype('uint8')
import pymorph as m import mahotas from numpy import where, reshape image = mahotas.imread('B.png') # Load image b1 = image[:,:,0] < 100 # Make a binary image from the thresholded red channel b2 = m.erode(b1, m.sedisk(4)) # Erode to enhance contrast of the bridge b3 = m.open(b2,m.sedisk(4)) # Remove the bridge b4 = b2-b3 # Bridge plus small noise b5 = m.areaopen(b4,1000) # Remove small areas leaving only a thinned bridge b6 = m.dilate(b3)*b5 # Extend the non-bridge area slightly and get intersection with the bridge. #b6 is image of end of bridge, now find single points b7 = m.thin(b6, m.endpoints('homotopic')) # Narrow regions to single points. labelled = m.label(b7) # Label endpoints. x1, y1 = reshape(where(labelled == 1),(1,2))[0] x2, y2 = reshape(where(labelled == 2),(1,2))[0] outputimage = m.overlay(b1, m.dilate(b7,m.sedisk(5))) mahotas.imsave('output.png', outputimage)
mask = filter_org > thres_ratio * peak # ostu threshold T = mahotas.thresholding.otsu(im16) thres_list.append(T) mmask = im16 > thres_ratio * T im_ma = im16 * mmask # pymorph labeled, nr_obj = ndimage.label(mask) max_label = labeled[max_idx / w][max_idx % w] max_mask = labeled == max_label top_hat_mask = ndimage.morphology.white_tophat(mask, (4, 4)) dmask = pm.dilate(max_mask) ndmask = ~dmask peak_template = dmask * im16_org removed_peak = ndmask * im16_org ratio_peak = removed_peak.max() / (peak * 1.) second_peak_list.append(ratio_peak) if False: #ratio_peak > 0.9: plt.imshow(im16_org) output_path = output_dir +'/peak_' + str("%0.3f" % ratio_peak)+ \ f[index].rsplit('.', 2)[0] + '.png' plt.savefig(output_path)