def remove_space(img, threshold=40, kernel_size=3, keepprop=6, vborder=3, resize=False): # Filter horizontal imgfilt = ndimage.minimum_filter(img[6:-6].max(2), size=kernel_size) rows = np.max(imgfilt, 0) > threshold from_, to_ = np.where(rows)[0][[0, -1]] keeprand = np.random.choice(range(from_, to_), (to_ - from_) // keepprop, replace=False) rows[keeprand] = True imgout = img[:, rows] # Filter vertical h, w, _ = imgout.shape imgfilt = ndimage.minimum_filter(imgout[:, vborder:-vborder].max(2), size=kernel_size) cols = np.where(np.max(imgfilt, 1) > threshold)[0] from_ = 0 if cols[0] < 2 else cols[0] + vborder to_ = h if cols[-2] == h - 2 * vborder - 1 else cols[-1] imgout = imgout[from_:to_] # rows = np.where(np.max(imgfilt, 0) > threshold)[0] if resize: # Rescale to imgin height scale = h / imgout.shape[0] imgout = cv2.resize(imgout, (int(round(scale * imgout.shape[1])), h)) return imgout
def fill_gaps(data, mask): """Interpolate in the gaps in the data Parameters ---------- data: np.ndarray data to have values filled in for mask: float or nd.ndarray If an nd.ndarray, it will be assumed to be a mask with values equal to 1 where they should be interpolated over. If a float, pixels with that value will be replaced """ ys, xs = data.shape if isinstance(mask, numpy.ndarray): mask = (mask == 0) for i in range(ys): x = numpy.arange(xs) rdata = data[i, :] rmask = mask[i, :] rmask = nd.minimum_filter(rmask, size=3) rdata = numpy.interp(x, x[rmask], rdata[rmask]) data[i, rmask == 0] = rdata[rmask == 0] else: mask = (data != mask) for i in range(ys): x = numpy.arange(xs) rdata = data[i, :] rmask = mask[i, :] rmask = nd.minimum_filter(rmask, size=3) rdata = numpy.interp(x, x[rmask], rdata[rmask]) data[i, rmask == 0] = rdata[rmask == 0] return data
def test_multiple_modes(): # Test that the filters with multiple mode cababilities for different # dimensions give the same result as applying a single mode. arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) mode1 = 'reflect' mode2 = ['reflect', 'reflect'] assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1), sndi.gaussian_filter(arr, 1, mode=mode2)) assert_equal(sndi.prewitt(arr, mode=mode1), sndi.prewitt(arr, mode=mode2)) assert_equal(sndi.sobel(arr, mode=mode1), sndi.sobel(arr, mode=mode2)) assert_equal(sndi.laplace(arr, mode=mode1), sndi.laplace(arr, mode=mode2)) assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1), sndi.gaussian_laplace(arr, 1, mode=mode2)) assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1), sndi.maximum_filter(arr, size=5, mode=mode2)) assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1), sndi.minimum_filter(arr, size=5, mode=mode2)) assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1), sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2)) assert_equal(sndi.uniform_filter(arr, 5, mode=mode1), sndi.uniform_filter(arr, 5, mode=mode2))
def test_locmin(self): arr = np.ones((9, 12)) arr[4, 3] = -2 arr[4, 7] = -5 filter = ndimage.minimum_filter(arr, 8, mode='constant', cval=np.min(arr) - 1) filter[filter == np.max(arr)] = -999 maxoutt = (arr == filter) yy, xx = np.where((maxoutt == 1)) assert (yy, xx) == (4, 7) filter = ndimage.minimum_filter(arr, 3, mode='constant', cval=np.min(arr) - 1) filter[filter == np.max(arr)] = -999 maxoutt = (arr == filter) yy, xx = np.where((maxoutt == 1)) assert_array_equal(np.array([4, 4]), yy) assert_array_equal(np.array([3, 7]), xx)
def bf_fluo_correlate(embryo_idx, bf_image, fluo_image, normalized=False, smoothed=None, dim=3): """Plot correlation between bf/fluo images, pixel-by-pixel or smoothed in tiles.""" norm_str = ('N' if normalized else 'Unn') + 'ormalized' smooth_str = 'unsmoothed' if normalized: bf_image = normalize_img(bf_image) fluo_image = normalize_img(fluo_image) if smoothed: if smoothed == 'avg': weights = [[1 / (dim**2)] * dim] * dim bf_image = correlate(bf_image, weights) fluo_image = correlate(fluo_image, weights) elif smoothed == 'max': bf_image = maximum_filter(bf_image, size=dim) fluo_image = maximum_filter(fluo_image, size=dim) elif smoothed == 'min': bf_image = minimum_filter(bf_image, size=dim) fluo_image = minimum_filter(fluo_image, size=dim) else: raise Exception('smoothed can only be avg, max, or min') smooth_str = 'smoothed by ' + smoothed plt.scatter(bf_image.flatten(), fluo_image.flatten()) plt.xlabel(f"{norm_str} bf frame val") plt.ylabel(f"{norm_str} fluo frame val") plt.title(f"Correlation plot for {smooth_str} embryo {embryo_idx}") plt.show()
def quick_bg_fix(raw_data, npix=4112): left_xx = np.arange(npix/2) right_xx = np.arange(npix/2, npix) left_bg = ndimage.minimum_filter(ndimage.gaussian_filter(raw_data[:npix/2],3), size=100) right_bg = ndimage.minimum_filter(ndimage.gaussian_filter(raw_data[npix/2:],3), size=100) data = raw_data.copy() data[left_xx] = raw_data[left_xx] - left_bg data[right_xx] = raw_data[right_xx] - right_bg return data
def filtroMin(I): If = I.copy() Ir = If[:,:,0] Iv = If[:,:,1] Ia = If[:,:,2] Ir = ndimage.minimum_filter(Ir, 3) Iv = ndimage.minimum_filter(Iv, 3) Ia = ndimage.minimum_filter(Ia, 3) If[:,:,0] = Ir If[:,:,1] = Iv If[:,:,2] = Ia return If
def file_loop(f): print('Doing file: ' + f) dic = xr.open_dataset(f) res = [] outt = dic['t_lag0'].values outp = dic['p'].values for nb in range(5): boole = np.isnan(outp) outp[boole] = -1000 gg = np.gradient(outp) outp[boole] = np.nan outp[abs(gg[1]) > 300] = np.nan outp[abs(gg[0]) > 300] = np.nan gradi = np.gradient(outt) grad = gradi[1] maxoutt = (outt == ndimage.minimum_filter(outt, 20, mode='constant', cval=np.amin(outt) - 1)) maxoutt = maxoutt.astype(int) yt, xt = np.where((maxoutt == 1) & (outt < -50)) tstr = ['t'] * len(yt) maxoutp = (outp == ndimage.maximum_filter(outp, 20, mode='constant', cval=np.amax(outp) + 1)) maxoutp = maxoutp.astype(int) yp, xp = np.where((maxoutp == 1) & (outp > 10)) maxoutg = (grad == ndimage.minimum_filter(grad, 20, mode='constant', cval=np.amin(grad) - 1)) maxoutg = maxoutg.astype(int) yg, xg = np.where((maxoutg == 1) & (grad < -10)) gstr = ['g'] * len(yg) tstr.extend(gstr) tglist = tstr tgx = [xt, xg] tgx = [item for sublist in tgx for item in sublist] tgy = [yt, yg] tgy = [item for sublist in tgy for item in sublist] points = np.array(list(zip(tgy, tgx))) for point in zip(yp, xp): try: pos = ua.closest_point(point, points) except ValueError: continue if tglist[pos] == 't': res.append((tglist[pos], outt[tuple(points[pos])], outp[point])) if tglist[pos] == 'g': res.append((tglist[pos], grad[tuple(points[pos])], outp[point])) dic.close() return res
def difference_of_gaussians(image, octaves_num=4, σ=1.6, scales_num=3): differences = [] # computing all differences for oct in range(octaves_num): image_zoomed = ndim.zoom(image, 0.5 ** oct) differences.append(difference_of_gaussians_one_octave(image_zoomed, σ, scales_num)) # find keypoints # computing maximal values in 3x3 windows from every difference footprint = np.ones((3,3,3)) footprint[1,1,1] = 0 keypoints_for_octaves = [] for oct in range(octaves_num): diff = differences[oct] # computing indices of maximas maxima_around = ndim.maximum_filter(diff, footprint=footprint)[1:4] minima_around = ndim.minimum_filter(diff, footprint=footprint)[1:4] maxima_mask = diff[1:4] > maxima_around minima_mask = diff[1:4] < minima_around keypoints_candidates = np.argwhere(maxima_mask | minima_mask) draw_image_with_points('data/Episcopal_Gaudi/EG_2.jpg', np.fliplr(keypoints_candidates[:,1:]), (255,0,0), 'temp.jpg') # filter keypoints with low contrast keypoints = [] for s in range(scales_num): keypoints_after_filtering = filter_low_contrast_keypoints(keypoints_candidates[keypoints_candidates[:,0] == s][:,1:], differences[oct][s:s+3,:,:]) keypoints.append(keypoints_after_filtering) keypoints_for_octaves.append(np.concatenate(keypoints, axis=0)) return keypoints_for_octaves
def extract_spectral_peaks_2(self, spectrogram): """ :param spectrogram: :return: """ # computing local maximum points with the specified maximum filter dimension local_max_values = maximum_filter(input=spectrogram, size=(self.maximum_filter_height, self.maximum_filter_width)) # extracting time and frequency information for local maximum points j, i = np.where(spectrogram == local_max_values) peaks = list(zip(i, j)) # computing local minimum points with specified minimum filter dimension local_min_values = minimum_filter(input=spectrogram, size=(self.minimum_filter_height, self.minimum_filter_width)) # extracting time and frequency information for local minimums k, m = np.where(spectrogram == local_min_values) lows = list(zip(m, k)) # avoiding spectral points with are both local maximum and local minimum spectral_peaks = list(set(peaks) - set(lows)) # time and frequency information for extracted spectral peaks time_indices = [i[0] for i in spectral_peaks] freq_indices = [i[1] for i in spectral_peaks] spectral_peaks.sort(key=itemgetter(0)) return spectral_peaks, time_indices, freq_indices
def processImages(): sims = cPickle.load(open('AmuInstSimMats.pkl')) for i,sim in enumerate(sims): pyplot.figure(0,(16,9)) pyplot.imshow(sim, vmin = 0, vmax = 1, cmap = pyplot.get_cmap('gray'), aspect = 'auto', origin = 'lower') pyplot.title('Unfiltered Sim Matrix ' + str(i)) pyplot.savefig('Unfiltered Sim Matrix ' + str(i) + '.jpg') pyplot.figure(1,(16,9)) pyplot.imshow(filter.tv_denoise(numpy.array(sim,numpy.float64), weight = 1), vmin = 0, vmax = 1, cmap = pyplot.get_cmap('gray'), aspect = 'auto', origin = 'lower') pyplot.title('TV_Denoise ' + str(i)) pyplot.savefig('TV_Denoise ' + str(i) + '.jpg') pyplot.figure(2,(16,9)) pyplot.imshow(filter.threshold_adaptive(numpy.array(sim,numpy.float64),21), vmin = 0, vmax = 1, cmap = pyplot.get_cmap('gray'), aspect = 'auto', origin = 'lower') pyplot.title('Threshold_Adaptive ' + str(i)) pyplot.savefig('Threshold_Adaptive ' + str(i) + '.jpg') pyplot.figure(3,(16,9)) pyplot.imshow(ndimage.minimum_filter(numpy.array(sim,numpy.float64),size=2), vmin = 0, vmax = 1, cmap = pyplot.get_cmap('gray'), aspect = 'auto', origin = 'lower') pyplot.title('Local Minimum_Filter ' + str(i)) pyplot.savefig('Local Minimum_Filter ' + str(i) + '.jpg') pyplot.figure(4,(16,9)) template = numpy.array([[0,1,1,1,1,1,1,1],[1,0,1,1,1,1,1,1],[1,1,0,1,1,1,1,1],[1,1,1,0,1,1,1,1], [1,1,1,1,0,1,1,1],[1,1,1,1,1,0,1,1],[1,1,1,1,1,1,0,1],[1,1,1,1,1,1,1,0]]) pyplot.imshow(feature.match_template(numpy.array(sim,numpy.float64),template), vmin = 0, vmax = 1, cmap = pyplot.get_cmap('gray'), aspect = 'auto', origin = 'lower') pyplot.title('Match_Template with my own 8x8 beat diagonal template ' + str(i)) pyplot.savefig('Match_Template with my own 8x8 beat diagonal template ' + str(i) + '.jpg') sys.exit()
def get_minimums(self, floor): return minimum_filter( floor, footprint=[[0, 1, 0], [1, 0, 1], [0, 1, 0]], mode='constant', cval=10, ) > floor
def split_on_dash(img, label, threshold=40, kernel_size=2): # Filter horizontal imgfilt = ndimage.minimum_filter(img[6:-6].max(2), size=kernel_size) # Image.fromarray(imgfilt) rows = np.where(np.max(imgfilt, 0) > threshold)[0] diff = rows[1:] - rows[:-1] try: nbigcuts = len(np.where(diff >= 15)[0]) minbigcut = min(diff[diff >= 15]) nmdlcuts = len( np.where((diff < minbigcut) & (diff > (minbigcut - 12)))[0]) if not ((nbigcuts == 4) & (nmdlcuts == 0)): return None except: return None cuts = ((rows[np.where(diff > 15)[0] + 1] + rows[np.where(diff > 15)[0]]) / 2).astype(np.int16) if (cuts[0] < 80) or (img.shape[1] - cuts[-1] < 80): return None imgl = [img[:, :cuts[0]], img[:, cuts[1]:cuts[2]], img[:, cuts[3]:]] labl = label.split('-') if random.choice([True, False]): for p1, p2 in [(1, 0), (2, 0), (2, 1)]: imgl.append( remove_space(np.concatenate((imgl[p1], imgl[p2]), 1), keepprop=random.randrange(3, 20))) labl.append(labl[p1] + labl[p2]) outls = [(i, l) for i, l in zip(imgl[3:], labl[3:])] else: outls = [(remove_space(i, keepprop=random.randrange(3, 20)), l) for i, l in zip(imgl, labl)] return outls
def extract_spectral_peaks(self, spectrogram): """ A method to extract spectral peaks given the spectrogram of an audio. Parameters: spectrogram (numpy.ndarray): Time-Frequency representation of an audio. Returns: List : list of spectral peaks. """ # computing local maximum points with the specified maximum filter dimension local_max_values = maximum_filter(input=spectrogram, size=(self.maximum_filter_height, self.maximum_filter_width)) # extracting time and frequency information for local maximum points j, i = np.where(spectrogram == local_max_values) peaks = list(zip(i, j)) # computing local minimum points with specified minimum filter dimension local_min_values = minimum_filter(input=spectrogram, size=(self.minimum_filter_height, self.minimum_filter_width)) # extracting time and frequency information for local minimums k, m = np.where(spectrogram == local_min_values) lows = list(zip(m, k)) # avoiding spectral points with are both local maximum and local minimum spectral_peaks = list(set(peaks) - set(lows)) # time and frequency information for extracted spectral peaks time_indices = [i[0] for i in spectral_peaks] freq_indices = [i[1] for i in spectral_peaks] spectral_peaks.sort(key=itemgetter(0)) return spectral_peaks, time_indices, freq_indices
def clip(a): #return a result = ndimage.minimum_filter(gaussian_filter(a, 4), size=130) #bg = np.percentile(a, 45) #print(bg) #return(np.clip(a, -1e9, bg)) return result
def denoise(img): tmp = ndimage.minimum_filter(img, 3) tmp = wiener(tmp, 2) tmp = ndimage.maximum_filter(tmp, 2) tmp = ndimage.rank_filter(tmp, 2, 2) tmp = ndimage.gaussian_filter(tmp, 0.5) return tmp
def test_multiple_modes_sequentially(): # Test that the filters with multiple mode cababilities for different # dimensions give the same result as applying the filters with # different modes sequentially arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) modes = ['reflect', 'wrap'] expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0]) expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1]) assert_equal(expected, sndi.gaussian_filter(arr, 1, mode=modes)) expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0]) expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1]) assert_equal(expected, sndi.uniform_filter(arr, 5, mode=modes)) expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0]) expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1]) assert_equal(expected, sndi.maximum_filter(arr, size=5, mode=modes)) expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0]) expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1]) assert_equal(expected, sndi.minimum_filter(arr, size=5, mode=modes))
def find_corners(im, name): coherence, _, _ = structure.orientation_field(im, 11) pylab.imshow(im, cmap=pylab.cm.gray) pylab.figure() pylab.imshow(coherence) pylab.title('coherence') pylab.colorbar() local_mean = nd.filters.gaussian_filter(im.astype(float), 20) local_variance = nd.filters.gaussian_filter(im.astype(float) ** 2.0, 20) - local_mean ** 2 pylab.figure() pylab.imshow(np.sqrt(local_variance) / local_mean, cmap=pylab.cm.gray) pylab.title('std / mean') pylab.colorbar() potential_corners = coherence / np.sqrt(local_variance) pylab.figure() pylab.imshow(potential_corners, cmap=pylab.cm.gray) pylab.colorbar() # find local max with a suppression window of radius 11 pc_max = nd.minimum_filter(potential_corners, (20, 20)) corners = potential_corners == pc_max print np.sum(corners) corners = nd.maximum_filter(corners, (5, 5)) pylab.show() imtmp = np.dstack((im, im, im)) imtmp[:, :, 2][corners] = 255 cv2.imshow(name, imtmp[::2, ::2])
def _roll_ball(file, DAPI_roi, size=20): blured = _gaussian_blur(file, DAPI_roi) result = np.empty(blured.shape) # Background substraction background = ndimage.minimum_filter(blured, size=8) result = blured - background return (result)
def apply_min_filter(cells, start_pos, min_dist): from scipy.ndimage import minimum_filter ncells = cells.copy() ## ncells[ncells == 0] = 2*ncells.max() ncells = farray(minimum_filter(ncells, size=min_dist)) minima = [ tuple(x) for x in transpose(( logical_and(ncells == cells, ncells != ncells.max())).nonzero()) ] class Duplicate: pass while True: try: for min1, min2 in combinations(minima, 2): if (farray(min1) - farray(min2)).norm() < min_dist: print 'duplicate', min1, min2 raise Duplicate else: break except Duplicate: d1 = (farray(min1) - farray(start_pos)).norm() d2 = (farray(min2) - farray(start_pos)).norm() print d1, d2 if d1 < d2: print 'removing', min2 minima.remove(min2) else: print 'removing', min1 minima.remove(min1) return minima
def adjacent_labels(labels): m1 = nd.maximum_filter(labels, size=3) m1[m1 == labels] = 0 m2 = nd.minimum_filter(labels, size=3) m2[m2 == labels] = 0 m1[m2 > 0] = m2[m2 > 0] return m1
def get_slopes(self, reso, n, fp, normalize=True, intersect=False): ds = buzz.DataSource() with ds.open_araster(self.dsm_path(reso, n)).close as r: if intersect: fp = r.fp.dilate(fp.rlength // 2) & fp arr = r.get_data(fp=fp.dilate(1)) nodata_mask = arr == r.nodata nodata_mask = ndi.binary_dilation(nodata_mask) kernel = [ [0, 1, 0], [1, 1, 1], [0, 1, 0], ] arru = ndi.maximum_filter(arr, None, kernel) - arr arru = np.arctan(arru / fp.pxsizex) arru = arru / np.pi * 180. arru[nodata_mask] = 0 arru = arru[1:-1, 1:-1] arrd = arr - ndi.minimum_filter(arr, None, kernel) arrd = np.arctan(arrd / fp.pxsizex) arrd = arrd / np.pi * 180. arrd[nodata_mask] = 0 arrd = arrd[1:-1, 1:-1] arr = np.dstack([arrd, arru]) if normalize: arr = arr / 45 - 1 return arr
def sliding_minima(img=None, weight=1., window_size=30, boundary_condition='reflect'): ''' Subtracts the minimum calculated within a sliding window from the centre of the window. Parameters ---------- img : array_like Single image as numpy array or multiple images as array-like object weight : scalar Fraction of minima to be subtracted from each pixel. Value of `weight` should be in the interval (0.0,1.0). window_size : scalar or tuple Sets the size of the sliding window. Specifying `window_size=3` is equivalent to `window_size=(3,3)`. boundary_condition : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'} Mode of handling array borders. ''' img_out = img - weight * nd.minimum_filter( img, size=window_size, mode=boundary_condition) return img_out
def find_v_cut(cut): overlap_x = cut.shape[1] if overlap_x < 3: return None w1height = cut.shape[0] min_vals = np.empty_like(cut) min_vals[0] = cut[0] for i in range(1, w1height, 1): min_vals[i] = cut[i] + ndimage.minimum_filter(min_vals[i - 1:i + 1], footprint=[[1, 1, 1], [0, 0, 0]], mode='constant')[1] min_vals[i, 0] = cut[i, 0] + min(min_vals[i - 1, 0], min_vals[i - 1, 1]) min_vals[i, -1] = cut[i, -1] + min(min_vals[i - 1, -1], min_vals[-i - 1, -1]) res = np.empty(w1height) res[-1] = np.argmin(min_vals[-1]) for i in range(w1height - 2, -1, -1): res[i] = res[i + 1] + np.argmin(min_vals[ i, max(res[i + 1] - 1, 0):min(overlap_x, res[i + 1] + 2)]) - 1 + int(res[i + 1] - 1 < 0) return res
def pointFinder(image, seeing_pix, threshold): """Take an image file and identify where point sources are. This is done by utilizing Scipy maximum and minimum filters. Inputs: image: an array of the image seeing_pix: a guess of the seeing size in pixel used to define the size of max/min filters threshold: a threshold (in counts) used to determine a detection from noise fluctuation Output: cutouts: list of cutouts of sources found in this image.""" #First, find maxima in the image #peaks = maximum_filter(image, footprint = pattern)#size = seeing_pix) #filter to find max peaks = maximum_filter(image, size=0.1 * seeing_pix) maxima = (peaks == image ) #booleen array indicating locations of the maxima #now, compute the minimum for background subtraction. This will get rid of #"maxima" that are really just noise troughs = minimum_filter( image, size=4 * seeing_pix) #to find min. make sure we erase the traces #now make sure that the real maxima are large enough from the backfround diff = (peaks - troughs) > threshold #label this such that every clump of '1's are labelled. Scipy magic! labeled, num_obj = label(diff) #, structure = pattern) cutouts = find_objects(labeled) #get cutouts of area with objects return cutouts
def computeLocalMaxima(self, harrisImage): ''' Input: harrisImage -- numpy array containing the Harris score at each pixel. Output: destImage -- numpy array containing True/False at each pixel, depending on whether the pixel value is the local maxima in its 7x7 neighborhood. ''' destImage = np.zeros_like(harrisImage, np.bool) # TODO 2: Compute the local maxima image # TODO-BLOCK-BEGIN # raise Exception("TODO in features.py not implemented") maximg = ndimage.maximum_filter(harrisImage, [7, 7]) minimg = ndimage.minimum_filter(harrisImage, [7, 7]) height, width = harrisImage.shape[:2] for i in range(0, height): for j in range(0, width): destImage[i, j] = (maximg[i, j] == harrisImage[i, j]) and ( maximg[i, j] != minimg[i, j]) #destImage[i, j] = (maximg[i, j] == harrisImage[i, j]) # TODO-BLOCK-END return destImage
def find_peaks_minmax(z, distance=5., threshold=10.): """Method to locate the positive peaks in an image by comparing maximum and minimum filtered images. Parameters ---------- z : numpy.ndarray Matrix of image intensities. distance : float Expected distance between peaks. threshold : float Minimum difference between maximum and minimum filtered images. Returns ------- peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2) Peak pixel coordinates. """ data_max = ndi.maximum_filter(z, distance) maxima = (z == data_max) data_min = ndi.minimum_filter(z, distance) diff = ((data_max - data_min) > threshold) maxima[diff == 0] = 0 labeled, num_objects = ndi.label(maxima) peaks = np.array(ndi.center_of_mass(z, labeled, range(1, num_objects + 1))) return clean_peaks(np.round(peaks).astype(int))
def new_id_slits(flat): ncols = flat.shape[1] midtrace = flat[:, ncols / 2].astype(scipy.float64) midtrace[midtrace <= 0.] = 1. laplace = scipy.array([-1., -1., -1., 1., 1., 1.]) deriv = signal.convolve(midtrace, laplace, mode='same') / midtrace d2 = abs(signal.convolve(midtrace, [-1., 1.], mode='same')) deriv = ndimage.gaussian_filter1d(deriv, 1) deriv[:5] = 0. deriv[-5:] = 0. tmp = deriv.copy() tmp.sort() std = tmp[tmp.size * 0.01:tmp.size * 0.99].std() peaks = ndimage.maximum_filter(deriv, 9) right = scipy.where((peaks == deriv) & (peaks > std))[0] peaks = ndimage.minimum_filter(deriv, 9) left = scipy.where((peaks == deriv) & (peaks < -1. * std))[0] orders = [] stars = [] for i in range(len(left)): thresh = stats.stats.std(midtrace[left[i] + 3:right[i] - 3]) good = scipy.where(d2[left[i]:right[i]] < thresh)[0] + left[i] orders.append([good[0], good[-1]]) if deriv[right[i]] > 3. * std: stars.append([good[0], good[-1]]) print orders print stars return orders
def get_csf(image, x, y, params): res_image, mean_intensity, mean_x, mean_y, abs_steps, angle_steps, xs_AB, ys_AB, cols_AB = lib.make_preobr( image, x, y, params) maxes = ndimage.maximum_filter(res_image, size=wind4csf, mode="wrap") mines = ndimage.minimum_filter(res_image, size=wind4csf, mode="wrap") michelson_contrast = (maxes - mines) / (maxes + mines) fig, ax = plt.subplots(nrows=1, ncols=4) # , figsize=(15, 15) ax[0].imshow(image, cmap='gray', vmin=0, vmax=255) ax[1].imshow(res_image, cmap='gray', vmin=0, vmax=255) ax[2].imshow(michelson_contrast, cmap='gray', vmin=0, vmax=1) center_line = michelson_contrast[Len_x // 2, Len_y // 2:] # wind = signal.parzen(35) # center_line = np.convolve(center_line, wind, "same") ax[3].plot(center_line) fig.savefig(params["outfile"] + ".png", dpi=200) plt.close('all') return center_line
def compute_data(compute_fp, input_data, input_fps, selfraster): """ computes up and down slopes """ arr = input_data[0] assert arr.shape == tuple(compute_fp.dilate(1).shape) nodata_mask = arr == nodata nodata_mask = ndi.binary_dilation(nodata_mask) kernel = [ [0, 1, 0], [1, 1, 1], [0, 1, 0], ] arru = ndi.maximum_filter(arr, None, kernel) - arr arru = np.arctan(arru / full_fp.pxsizex) arru = arru / np.pi * 180. arru[nodata_mask] = 0 arru = arru[1:-1, 1:-1] arrd = arr - ndi.minimum_filter(arr, None, kernel) arrd = np.arctan(arrd / full_fp.pxsizex) arrd = arrd / np.pi * 180. arrd[nodata_mask] = 0 arrd = arrd[1:-1, 1:-1] arr = np.dstack([arrd, arru]) return arr
def regional_minima(a, connectivity=1): """Find the regional minima in an ndarray.""" values = unique(a) delta = (values - minimum_filter(values, footprint=ones(3)))[1:].min() marker = complement(a) mask = marker + delta return marker == morphological_reconstruction(marker, mask, connectivity)
def depth_discont_sobel(di, tol=0.5): import scipy.ndimage as ndimage dx = ndimage.convolve(di, [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) dy = ndimage.convolve(di, [[-1, -2, -1], [0, 0, 0], [1, 2, 1]]) diff_im = np.sqrt(dx**2 + dy**2) diff_im[diff_im < tol] = 0 #import pydb; pydb.set_trace() di_diff = np.zeros(di.shape) di_diff[diff_im > 0] = di[diff_im > 0] #import pdb;pdb.set_trace() #import pylab #pylab.ion() di_diff = ndimage.minimum_filter(di_diff, size=(2, 2)) #for i in range(di_diff.shape[0]): # for j in range(di_diff.shape[1]-1): # p1 = di_diff[i,j] # p2 = di_diff[i,j+1] # if p1 == 0 or p2 == 0: # continue # if p1 > p2: # p1 = 0 # elif p2 > p1: # p2 = 0 # di_diff[i,j] = p1 # di_diff[i,j+1] = p2 #import pdb;pdb.set_trace() #pylab.imshow(di_diff) return di_diff
def regional_minima(a, connectivity=1): """Find the regional minima in an ndarray.""" values = unique(a) delta = (values - minimum_filter(values, footprint=ones(3)))[1:].min() marker = complement(a) mask = marker+delta return marker == morphological_reconstruction(marker, mask, connectivity)
def find_h_cut(cut): overlap_y = cut.shape[0] if overlap_y < 3: return None # overlap too small w1width = cut.shape[1] min_vals = np.empty_like(cut) min_vals[:, 0] = cut[:, 0] for i in range(1, w1width, 1): min_vals[:, i] = cut[:, i] + ndimage.minimum_filter( min_vals[:, i - 1:i + 1], footprint=[[1, 0], [1, 0], [1, 0]], mode='constant')[:, 1] min_vals[0, i] = cut[0, i] + min(min_vals[0, i - 1], min_vals[1, i - 1]) min_vals[-1, i] = cut[-1, i] + min(min_vals[-1, i - 1], min_vals[-1, i - 1]) res = np.empty(w1width) res[-1] = np.argmin(min_vals[:, -1]) for i in range(w1width - 2, -1, -1): res[i] = res[i + 1] + np.argmin( min_vals[max(0, res[i + 1] - 1):min(overlap_y, res[i + 1] + 2), i]) - 1 + int(res[i + 1] - 1 < 0) return res
def argmax_translation(array, filter_pcorr, constraints=None, reports=None): if constraints is None: constraints = dict(tx=(0, None), ty=(0, None)) # We want to keep the original and here is obvious that # it won't get changed inadvertently array_orig = array.copy() if filter_pcorr > 0: array = ndi.minimum_filter(array, filter_pcorr) ashape = np.array(array.shape, int) mask = np.ones(ashape, float) # first goes Y, then X for dim, key in enumerate(("ty", "tx")): if constraints.get(key, (0, None))[1] is None: continue pos, sigma = constraints[key] alen = ashape[dim] dom = np.linspace(-alen // 2, -alen // 2 + alen, alen, False) if sigma == 0: # generate a binary array closest to the position idx = np.argmin(np.abs(dom - pos)) vals = np.zeros(dom.size) vals[idx] = 1.0 else: vals = np.exp(-(dom - pos)**2 / sigma**2) if dim == 0: mask *= vals[:, np.newaxis] else: mask *= vals[np.newaxis, :] array *= mask # WE ARE FFTSHIFTED already. # ban translations that are too big thresh = ashape // 6 mask2 = np.zeros(ashape, int) mask2[thresh[0]:-thresh[0], thresh[1]:-thresh[1]] = 1 array *= mask2 # Find what we look for tvec = _argmax_ext(array, 'inf') tvec = _interpolate(array_orig, tvec) if 0: print("tvec: %s" % tvec) import pylab as pyl pyl.figure() pyl.imshow(array, cmap=pyl.cm.gray, interpolation='nearest') pyl.colorbar() # pyl.show() # If we use constraints or min filter, # array_orig[tvec] may not be the maximum success = _get_success(array_orig, tuple(tvec), 2) if reports is not None: reports["amt-orig"] = array_orig.copy() reports["amt-postproc"] = array.copy() return tvec, success
def fill_simple_depressions(values): """ Fill simple depressions in-place. """ footprint = np.array([(1, 1, 1), (1, 0, 1), (1, 1, 1)], dtype='b1') edge = ndimage.minimum_filter(values, footprint=footprint) locs = edge > values values[locs] = edge[locs]
def on_image_image(self, image_id): #def on_image_image(self, request, image_id): result = self.work.get_image(image_id).copy() seg = self.work.get_segmentation(image_id) border = ndimage.maximum_filter(seg.labels,size=(3,3)) != ndimage.minimum_filter(seg.labels,size=(3,3)) result[border] = 0.0 return image_response(result)
def minfilt_thresh(array, t, rad): """ Applies a minimum filter to the input array and returns an array thresholded to the specified value. """ marray = ndi.minimum_filter(array, footprint=circlemask(rad)) thrarr = where(marray < t, 1, 0) return thrarr
def complex_flux(spectrogram, diff_frames=None, diff_max_bins=3, temporal_filter=3, temporal_origin=0): """ Complex Flux with a local group delay based tremolo suppression. :param spectrogram: Spectrogram instance :param diff_frames: number of frames to calculate the diff to [int] :param diff_max_bins: number of bins used for maximum filter [int] :param temporal_filter: temporal maximum filtering of the local group delay :param temporal_origin: origin of the temporal maximum filter :return: complex flux onset detection function "Local group delay based vibrato and tremolo suppression for onset detection" Sebastian Böck and Gerhard Widmer. Proceedings of the 13th International Society for Music Information Retrieval Conference (ISMIR), 2013. """ # create a mask based on the local group delay information from scipy.ndimage import maximum_filter, minimum_filter # take only absolute values of the local group delay and normalize them lgd = np.abs(spectrogram.stft.phase().lgd()) / np.pi # maximum filter along the temporal axis # TODO: use HPSS instead of simple temporal filtering if temporal_filter > 0: lgd = maximum_filter(lgd, size=[temporal_filter, 1], origin=temporal_origin) # lgd = uniform_filter(lgd, size=[1, 3]) # better for percussive onsets # create the weighting mask try: # if the magnitude spectrogram was filtered, use the minimum local # group delay value of each filterbank (expanded by one frequency # bin in both directions) as the mask mask = np.zeros_like(spectrogram) num_bins = lgd.shape[1] for b in range(mask.shape[1]): # determine the corner bins for the mask corner_bins = np.nonzero(spectrogram.filterbank[:, b])[0] # always expand to the next neighbour start_bin = corner_bins[0] - 1 stop_bin = corner_bins[-1] + 2 # constrain the range if start_bin < 0: start_bin = 0 if stop_bin > num_bins: stop_bin = num_bins # set mask mask[:, b] = np.amin(lgd[:, start_bin: stop_bin], axis=1) except AttributeError: # if the spectrogram is not filtered, use a simple minimum filter # covering only the current bin and its neighbours mask = minimum_filter(lgd, size=[1, 3]) # sum all positive 1st order max. filtered and weighted differences return np.sum(spectrogram.diff(diff_frames=diff_frames, diff_max_bins=diff_max_bins, positive_diffs=True) * mask, axis=1)
def __local_minima_fancy__(self,fits, window=50): from scipy.ndimage import minimum_filter fits = numpy.asarray(fits) minfits = minimum_filter(fits, size=window, mode="wrap") minima_mask = fits == minfits good_indices = numpy.arange(len(fits))[minima_mask] good_fits = fits[minima_mask] order = good_fits.argsort() return good_indices[order], good_fits[order]
def apply(array, **kwargs): """ Apply a set of standard filter to array data: Call: apply(array-data, <list of key=value arguments>) The list of key-value define the filtering to be done and should be given in the order to be process. Possible key-value are: * smooth: gaussian filtering, value is the sigma parameter (scalar or tuple) * uniform: uniform filtering (2) * max: maximum filtering (1) * min: minimum filtering (1) * median: median filtering (1) * dilate: grey dilatation (1) * erode: grey erosion (1) * close: grey closing (1) * open: grey opening (1) * linear_map: call linear_map(), value is the tuple (min,max) (3) * normalize: call normalize(), value is the method (3) * adaptive: call adaptive(), value is the sigma (3) * adaptive_: call adaptive(), with uniform kernel (3) The filtering is done using standard scipy.ndimage functions. (1) The value given (to the key) is the width of the the filter: the distance from the center pixel (the size of the filter is thus 2*value+1) The neighborhood is an (approximated) boolean circle (up to discretization) (2) Same as (*) but the neighborhood is a complete square (3) See doc of respective function """ for key in kwargs: value = kwargs[key] if key not in ('smooth','uniform'): fp = _kernel.distance(array.ndim*(2*value+1,))<=value # circular filter if key=='smooth' : array = _nd.gaussian_filter(array, sigma=value) elif key=='uniform': array = _nd.uniform_filter( array, size=2*value+1) elif key=='max' : array = _nd.maximum_filter( array, footprint=fp) elif key=='min' : array = _nd.minimum_filter( array, footprint=fp) elif key=='median' : array = _nd.median_filter( array, footprint=fp) elif key=='dilate' : array = _nd.grey_dilation( array, footprint=fp) elif key=='erode' : array = _nd.grey_erosion( array, footprint=fp) elif key=='open' : array = _nd.grey_opening( array, footprint=fp) elif key=='close' : array = _nd.grey_closing( array, footprint=fp) elif key=='linear_map': array = linear_map(array, min=value[0], max=value[1]) elif key=='normalize' : array = normalize( array, method = value) elif key=='adaptive' : array = adaptive( array, sigma = value, kernel='gaussian') elif key=='adaptive_' : array = adaptive( array, sigma = value, kernel='uniform') else: print '\033[031mUnrecognized filter :', key return array
def _lgd_mask(spec, lgd, filterbank=None, temporal_filter=0, temporal_origin=0): """ Calculates a weighting mask for the magnitude spectrogram based on the local group delay. :param spec: the magnitude spectrogram :param lgd: local group delay of the spectrogram :param filterbank: filterbank used for dimensionality reduction of the magnitude spectrogram :param temporal_filter: temporal maximum filtering of the local group delay :param temporal_origin: origin of the temporal maximum filter "Local group delay based vibrato and tremolo suppression for onset detection" Sebastian Böck and Gerhard Widmer. Proceedings of the 13th International Society for Music Information Retrieval Conference (ISMIR), 2013. """ from scipy.ndimage import maximum_filter, minimum_filter # take only absolute values of the local group delay lgd = np.abs(lgd) # maximum filter along the temporal axis if temporal_filter > 0: lgd = maximum_filter(lgd, size=[temporal_filter, 1], origin=temporal_origin) # lgd = uniform_filter(lgd, size=[1, 3]) # better for percussive onsets # create the weighting mask if filterbank is not None: # if the magnitude spectrogram was filtered, use the minimum local # group delay value of each filterbank (expanded by one frequency # bin in both directions) as the mask mask = np.zeros_like(spec) num_bins = lgd.shape[1] for b in range(mask.shape[1]): # determine the corner bins for the mask corner_bins = np.nonzero(filterbank[:, b])[0] # always expand to the next neighbour start_bin = corner_bins[0] - 1 stop_bin = corner_bins[-1] + 2 # constrain the range if start_bin < 0: start_bin = 0 if stop_bin > num_bins: stop_bin = num_bins # set mask mask[:, b] = np.amin(lgd[:, start_bin: stop_bin], axis=1) else: # if the spectrogram is not filtered, use a simple minimum filter # covering only the current bin and its neighbours mask = minimum_filter(lgd, size=[1, 3]) # return the normalized mask return mask / np.pi
def balanceimage(img, r, R): """Balance the brightness of an image by leveling. This is achieved here by applying a minimum filter over radius r and a uniform filter over radius R, and substracting the minimum of the two from the original image.""" img_min = ndimage.minimum_filter(img, r) img_uni = ndimage.uniform_filter(img, R) return img - numpy.minimum(img_min, img_uni)
def test_locmin(self): arr = np.ones((9, 12)) arr[4, 3] = -2 arr[4, 7] = -5 filter = ndimage.minimum_filter(arr, 8, mode='constant', cval=np.min(arr) - 1) filter[filter==np.max(arr)] = -999 maxoutt = (arr==filter) yy, xx = np.where((maxoutt == 1)) assert (yy, xx) == (4, 7) filter = ndimage.minimum_filter(arr, 3, mode='constant', cval=np.min(arr) - 1) filter[filter == np.max(arr)] = -999 maxoutt = (arr == filter) yy, xx = np.where((maxoutt == 1)) assert_array_equal(np.array([4,4]),yy) assert_array_equal(np.array([3, 7]), xx)
def local_minima(fits, window=15): """fm Zachary Pinkus""" from scipy.ndimage import minimum_filter fits = np.asarray(fits) minfits = minimum_filter(fits, size=window, mode="wrap") minima_mask = fits == minfits good_indices = np.arange(len(fits))[minima_mask] good_fits = fits[minima_mask] order = good_fits.argsort() return good_indices[order], good_fits[order]
def select_region_slices(sci_data, badpix_mask, box_size=256, num_boxes=10): """ Find the optimal regions for calculating the noise autocorrelation (areas with the fewest objects/least signal) :param sci_data: Science image data array :param badpix_mask: Boolean array that is True for bad pixels :param box_size: Size of the (square) boxes to select :param num_boxes: Number of boxes to select. If insufficient good pixels can be found, will return as many boxes as possible :return: List of 2D slices, tuples of (slice_y, slice_x) """ # TODO: For large images this is pretty slow, due to 3 full-size filters img_boxcar = ndimg.uniform_filter(sci_data, size=box_size) # Smooth over mask with min filter, to ignore small areas of bad pixels # One tenth of box size in each dimension means ignoring bad pixel regions # comprising <1% of total box pixels smooth_size = box_size // 10 badpix_mask = ndimg.minimum_filter(badpix_mask, size=smooth_size, mode='constant', cval=False) # Expand zone of avoidance of bad pixels, so we don't pick boxes that # contain them. mode=constant, cval=True means treat all borders as # if they were masked-out pixels badpix_mask = ndimg.maximum_filter(badpix_mask, size=smooth_size + box_size, mode='constant', cval=True) img_boxcar = np.ma.array(img_boxcar, mask=badpix_mask) box_slices = [] for box in range(num_boxes): # Find the location of the minimum value of the boxcar image, excluding # masked areas. This will be a pixel with few nearby sources within one # box width min_loc = img_boxcar.argmin() min_loc = np.unravel_index(min_loc, img_boxcar.shape) lower_left = tuple(int(x - box_size / 2) for x in min_loc) # Negative values of lower_left mean argmin ran out of unmasked pixels if lower_left[0] < 0 or lower_left[1] < 0: warn('Ran out of good pixels when placing RMS calculation regions ' 'for file {}. Only {:d} boxes selected.'.format(sci_data, box)) break min_slice = tuple(slice(x, x + box_size) for x in lower_left) box_slices += [min_slice] # Zone of avoidance (for center) is twice as big, since we are picking # box centers. Use clip to ensure avoidance slice stays within array # bounds lower_left = tuple(int(x - box_size) for x in min_loc) avoid_slice = tuple(slice(np.clip(x, 0, extent), np.clip(x + 2 * box_size, 0, extent)) for x, extent in zip(lower_left, img_boxcar.shape)) # Add this box to the mask img_boxcar[avoid_slice] = np.ma.masked return box_slices
def getCutoffCriteria(self, errorArray): #do a small minimum filter to get rid of outliers size = int(len(errorArray)**0.3)+1 errorArray2 = ndimage.minimum_filter(errorArray, size=size, mode='wrap') mean = ndimage.mean(errorArray2) stdev = ndimage.standard_deviation(errorArray2) ### this is so arbitrary cut = mean + 5.0 * stdev + 2.0 ### anything bigger than 20 pixels is too big if cut > self.data['pixdiam']: cut = self.data['pixdiam'] return cut
def argmax_translation(array, filter_pcorr, constraints=None, reports=None): if constraints is None: constraints = dict(tx=(0, None), ty=(0, None)) # We want to keep the original and here is obvious that # it won't get changed inadvertently array_orig = array.copy() if filter_pcorr > 0: array = ndi.minimum_filter(array, filter_pcorr) ashape = np.array(array.shape, int) mask = np.ones(ashape, float) # first goes Y, then X for dim, key in enumerate(("ty", "tx")): if constraints.get(key, (0, None))[1] is None: continue pos, sigma = constraints[key] alen = ashape[dim] dom = np.linspace(-alen // 2, -alen // 2 + alen, alen, False) if sigma == 0: # generate a binary array closest to the position idx = np.argmin(np.abs(dom - pos)) vals = np.zeros(dom.size) vals[idx] = 1.0 else: vals = np.exp(- (dom - pos) ** 2 / sigma ** 2) if dim == 0: mask *= vals[:, np.newaxis] else: mask *= vals[np.newaxis, :] array *= mask # WE ARE FFTSHIFTED already. # ban translations that are too big aporad = (ashape // 6).min() mask2 = get_apofield(ashape, aporad) array *= mask2 # Find what we look for tvec = _argmax_ext(array, 'inf') tvec = _interpolate(array_orig, tvec) # If we use constraints or min filter, # array_orig[tvec] may not be the maximum success = _get_success(array_orig, tuple(tvec), 2) if reports is not None and reports.show("translation"): reports["amt-orig"] = array_orig.copy() reports["amt-postproc"] = array.copy() return tvec, success
def cleanspectra(w, f, e=None, dw=2, grow=6, neg=False): """Remove possible bad pixels""" if e is None: e = f*0.0+f.std() m = (w>0) #set a few unreliable sky lines to zero for l in [5577, 6300, 6364]: m[abs(w-l)<dw]=0 if neg: m = m * (f>0) #remove and grow the bad areas m = nd.minimum_filter(m, size=grow) return w[m],f[m],e[m]
def local_maxima(data, span=10, sign=1): from scipy.ndimage import minimum_filter from scipy.ndimage import maximum_filter data = numpy.asarray(data) print 'data size: ', data.shape if sign <= 0: # look for minima maxfits = minimum_filter(data, size=span, mode="wrap") else: maxfits = maximum_filter(data, size=span, mode="wrap") print 'maxfits shape: ', maxfits.shape maxima_mask = numpy.where(data == maxfits) good_indices = numpy.arange(len(data))[maxima_mask] print 'len good index: ', len(good_indices) good_fits = data[maxima_mask] order = good_fits.argsort() return good_indices[order], good_fits[order]
def rolling_minimum_background(img, size=(31, 51), kernel=None, geometry='rectangular', topography='flat', percentile=0): """ Instead of calculating the resulting image, just calculate the background and apply with img -= rolling_minimum_background(img) That way you can apply any amount of pre-filters without complex logic: img -= rolling_minimum_background(gaussian_filter(img, sigma=2)) Notes: This doesn't work well with images with sharp boundaries, e.g. the edge of a gel. For best result, apply AFTER opening() and col+row leveling. Args: img: size: int or 2-tuple with (height, width), should be higher than the thickest band and wider than the widest smear. Square is usually OK. kernel: specify kernel / footprint / structuring element manually. geometry: topography: percentile: Use percentile_filter with this percentile instead of minimum_filter (equivalent to percentile=0) Returns: background image """ if kernel is None: if isinstance(size, int): size = (size, size) if geometry in ('round', 'disk', 'ellipse'): # multiply with round binary kernel with ones round/elliptical shape. kernel = ellipse_binary(size) elif geometry == 'rectangular' or geometry is None: kernel = np.ones(size) if topography == 'ball': # topography generally doesn't work because ndimage filters takes boolean footprints. # I could probalby do it with a generic filter, or by some other means. pass if percentile: # percentile_filter; footprint must be boolean array; size=(n,m) is equivalent to footprint=np.ones((n,m)) background = percentile_filter(img, percentile=percentile, footprint=kernel) else: background = minimum_filter(img, footprint=kernel) return background
def non_extrema_suppression(features, size=None, output=None): """Set non-extrema to nan""" # Find maxima maxima = ndimage.maximum_filter(features, size) == features # Find minima minima = ndimage.minimum_filter(features, size) == features extrema = np.logical_xor(maxima, minima) if output is None: output = np.zeros_like(features) output[extrema] = features[extrema] output[np.logical_not(extrema)] = np.nan return output
def find_h_cut(cut): overlap_y = cut.shape[0] if overlap_y < 3: return None # overlap too small w1width = cut.shape[1] min_vals = np.empty_like(cut) min_vals[:, 0] = cut[:, 0] for i in range(1, w1width, 1): min_vals[:, i] = cut[:, i] + ndimage.minimum_filter(min_vals[:,i-1:i+1], footprint = [[1,0], [1,0], [1,0]], mode = 'constant')[:,1] min_vals[0, i] = cut[0, i] + min (min_vals[0, i-1], min_vals[1, i-1]) min_vals[-1, i] = cut[-1, i] + min (min_vals[-1, i-1], min_vals[-1, i-1]) res = np.empty(w1width) res[-1] = np.argmin(min_vals[:,-1]) for i in range(w1width-2, -1, -1): res[i] = res[i+1] + np.argmin(min_vals[max(0,res[i+1]-1): min(overlap_y,res[i+1]+2), i]) - 1 + int(res[i+1]-1 < 0) return res
def find_v_cut(cut): overlap_x = cut.shape[1] if overlap_x < 3: return None w1height = cut.shape[0] min_vals = np.empty_like(cut) min_vals[0] = cut[0] for i in range(1, w1height, 1): min_vals[i] = cut[i] + ndimage.minimum_filter(min_vals[i-1:i+1], footprint = [[1,1,1], [0, 0, 0]], mode = 'constant')[1] min_vals[i, 0] = cut[i, 0] + min (min_vals[i-1, 0], min_vals[i-1, 1]) min_vals[i, -1] = cut[i, -1] + min (min_vals[i-1, -1], min_vals[-i-1, -1]) res = np.empty(w1height) res[-1] = np.argmin(min_vals[-1]) for i in range(w1height-2, -1, -1): res[i] = res[i+1] + np.argmin(min_vals[i, max(res[i+1]-1, 0): min(overlap_x,res[i+1]+2)]) - 1 + int(res[i+1]-1 < 0) return res
def find_all_nthres_fast(data, thres, msep, find_segs=False): """ Fast version of find_all_nthres_fast. See :py:func:`find_all_thres`. """ wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*seperation+1 # find local maxima mask mn = ndimage.minimum_filter(data, size=wsize, mode='constant') == data # find positive threshold mask nthres = np.ma.less(data, thres) # peaks are bitwise and of maximum mask and threshold mask locations = np.transpose(np.nonzero(np.bitwise_and(nthres, mn))) locations = [tuple(i) for i in locations] if find_segs: seg_slices = [find_pseg_slice(data, l, thres) for l in locations] return locations, seg_slices else: return locations
def sliding_minima(img=None, weight=1., window_size=30, boundary_condition='reflect'): ''' Subtracts the minimum calculated within a sliding window from the centre of the window. Parameters ---------- img : array_like Single image as numpy array or multiple images as array-like object weight : scalar Fraction of minima to be subtracted from each pixel. Value of `weight` should be in the interval (0.0,1.0). window_size : scalar or tuple Sets the size of the sliding window. Specifying `window_size=3` is equivalent to `window_size=(3,3)`. boundary_condition : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'} Mode of handling array borders. ''' img_out = img - weight * nd.minimum_filter(img, size=window_size, mode=boundary_condition) return img_out
def binaryContour(A): x3d = numpy.zeros([3,3,3]) x3d[1,1,:] = 1 x3d[:,1,1] = 1 x3d[1,:,1] = 1 return A-ndimage.minimum_filter(A,footprint=x3d).astype(numpy.uint8)