def prewitt_ndimage(arr): UINT8 = np.iinfo(np.uint8) x = ndimage.prewitt(arr, axis=0) y = ndimage.prewitt(arr) temp = np.hypot(x, y).clip(UINT8.min, UINT8.max).astype(np.uint8) set_border(temp, 255) return temp
def test_multiple_modes(): # Test that the filters with multiple mode cababilities for different # dimensions give the same result as applying a single mode. arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) mode1 = 'reflect' mode2 = ['reflect', 'reflect'] assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1), sndi.gaussian_filter(arr, 1, mode=mode2)) assert_equal(sndi.prewitt(arr, mode=mode1), sndi.prewitt(arr, mode=mode2)) assert_equal(sndi.sobel(arr, mode=mode1), sndi.sobel(arr, mode=mode2)) assert_equal(sndi.laplace(arr, mode=mode1), sndi.laplace(arr, mode=mode2)) assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1), sndi.gaussian_laplace(arr, 1, mode=mode2)) assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1), sndi.maximum_filter(arr, size=5, mode=mode2)) assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1), sndi.minimum_filter(arr, size=5, mode=mode2)) assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1), sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2)) assert_equal(sndi.uniform_filter(arr, 5, mode=mode1), sndi.uniform_filter(arr, 5, mode=mode2))
def compute_gradients(img): """ Computes the gradients of the input image in the x and y direction using a differentiation filter. ########################################################################## # TODO: Design a differentiation filter and update the docstring. Stick # # to a pure differentiation filter for this assignment. # # Hint: Look at Slide 14 from Lecture 3: Gradients. # ########################################################################## Input: Grayscale image of shape (H x W) Outputs: gx, gy gradients in x and y directions respectively """ gray = np.copy(img) if (len(gray) == 3): gray = rgb2gray(gray) # gx = gy = np.zeros_like(img) gx = -ndimage.prewitt(gray, axis=1, mode='constant') gy = -ndimage.prewitt(gray, axis=0, mode='constant') ########################################################################## # TODO: Design a pure differentiation filter and use correlation to # # compute the gradients gx and gy. You might have to try multiple # # filters till the test below passes. All the tests after will fail if # # this one does not pass. # ########################################################################## return gx, gy
def findedges(image, image_file, issave): data = image[y0:y1, x0:x1, :] x, y = np.meshgrid(np.arange(data.shape[1]), np.arange(data.shape[0])) gaus = scimg.fourier_gaussian(data[:, :, 0], sigma=0.01) can_x = scimg.prewitt(gaus, axis=0) can_y = scimg.prewitt(gaus, axis=1) can = np.hypot(can_x, can_y) # pulling out object edges fig3, ax3 = plt.subplots(2, 1, figsize=(10, 7)) ax3[0].pcolormesh(x, y, can, cmap='gist_ncar') bin_size = 30 # total bins to show percent_cutoff = 0.05 # cutoff once main peak tapers to 5% of max hist_vec = np.histogram(can.ravel(), bins=bin_size) hist_x, hist_y = hist_vec[0], hist_vec[1] for ii in range(np.argmax(hist_x), bin_size): hist_max = hist_y[ii] if hist_x[ii] < percent_cutoff * np.max(hist_x): break # scatter points where objects exist ax3[1].plot(x[can > hist_max], y[can > hist_max], marker='.', linestyle='', label='Scatter Above 5% Dropoff') ax3[1].set_xlim(np.min(x), np.max(x)) ax3[1].set_ylim(np.min(y), np.max(y)) ax3[1].legend() if issave: plt.savefig(image_file.split('.')[0] + '_edges.png') else: plt.show() print(hist_vec[1]) return stat_dsp(hist_vec[1], threshold)
def run(self, ips, snap, img, para=None): nimg.prewitt(snap, axis={ 'horizontal': 0, 'vertical': 1 }[para['axis']], output=img)
def run(self, ips, snap, img, para = None): if para['axis']=='both': img[:] = np.abs(nimg.prewitt(snap, axis=0, output=img.dtype)) img += np.abs( nimg.prewitt(snap, axis=1, output=img.dtype)) else: nimg.prewitt(snap, axis={'horizontal':0,'vertical':1}[para['axis']], output=img) img[:] = np.abs(img) img //= 3
def prewitt(img): """Function to apply a prewitt filter on a given input image. :param img: {numpy.array} image as numpy array :return: {numpy.array} filtered image """ sx = ndimage.prewitt(img, axis=0, mode='constant') sy = ndimage.prewitt(img, axis=1, mode='constant') prew = np.hypot(sx, sy) return prew
def prewitt(im): """ Edge detector using a Prewitt filter LONG RUN! Arguments: im - input image For classifier - recommended multiple runs after applying gaussian filter w sigma 1.0-16.0 """ im = im.astype('int32') sx = ndimage.prewitt(im, axis=(im.ndim - 2), mode='constant') sy = ndimage.prewitt(im, axis=(im.ndim - 1), mode='constant') return np.hypot(sx, sy)
def doCompute(): inlpos = xa.SI['nrinl']//2 crlpos = xa.SI['nrcrl']//2 while True: xa.doInput() indata = xa.Input['Input'] xa.Output['In-line gradient'] = prewitt(indata, axis=0)[inlpos,crlpos,:] xa.Output['Cross-line gradient'] = prewitt(indata, axis=1)[inlpos,crlpos,:] xa.Output['Z gradient'] = prewitt(indata, axis=2)[inlpos,crlpos,:] xa.Output['Average Gradient'] = ( xa.Output['In-line gradient'] + xa.Output['Cross-line gradient'] + xa.Output['Z gradient'] )/3 xa.doOutput()
def doCompute(): # # index of current trace position in Input numpy array # ilndx = xa.SI["nrinl"] // 2 crldx = xa.SI["nrcrl"] // 2 while True: xa.doInput() xa.Output["In-line gradient"] = prewitt(xa.Input, axis=0)[ilndx, crldx, :] xa.Output["Cross-line gradient"] = prewitt(xa.Input, axis=1)[ilndx, crldx, :] xa.Output["Z gradient"] = prewitt(xa.Input, axis=2)[ilndx, crldx, :] xa.Output["Average gradient"] = ( xa.Output["In-line gradient"] + xa.Output["Cross-line gradient"] + xa.Output["Z gradient"] ) / 3 xa.doOutput()
def genSimpleFeatures(volume): return [ ndimage.prewitt(volume), ndimage.sobel(volume) ] + \ genBlurSharpen(volume, 2.0) + \ genBlurSharpen(volume, 5.0)
def edge_filters(self): ''' Plot five edge-filters (kernels) in grayscale ''' self.gray = rgb2gray(self.im) self.edges = { 'Original': self.im, 'Grayscale': self.gray, 'Sobel': ndimage.sobel(self.gray), 'Prewitt': ndimage.prewitt(self.gray), 'Laplacian': ndimage.laplace(self.gray, mode='reflect'), 'LoG': ndimage.gaussian_laplace(self.gray, sigma=1, mode='reflect') } fig, axes = plt.subplots(2, 3, figsize=(18, 10)) axs = iter(axes.ravel()) for name, edge in self.edges.items(): ax = next(axs) ax.imshow(edge, cmap='gray') ax.set_title(name) fig.tight_layout() plt.savefig('.'.join(FNAME.split('.')[:-1]) + '_processed.png')
def doCompute(): inlpos = xa.SI['nrinl'] // 2 crlpos = xa.SI['nrcrl'] // 2 while True: xa.doInput() indata = xa.Input['Input'] xa.Output['In-line gradient'] = prewitt(indata, axis=0)[inlpos, crlpos, :] xa.Output['Cross-line gradient'] = prewitt(indata, axis=1)[inlpos, crlpos, :] xa.Output['Z gradient'] = prewitt(indata, axis=2)[inlpos, crlpos, :] xa.Output['Average Gradient'] = (xa.Output['In-line gradient'] + xa.Output['Cross-line gradient'] + xa.Output['Z gradient']) / 3 xa.doOutput()
def doCompute(): # # index of current trace position in Input numpy array # ilndx = xa.SI['nrinl'] // 2 crldx = xa.SI['nrcrl'] // 2 while True: xa.doInput() xa.Output['In-line gradient'] = prewitt(xa.Input, axis=0)[ilndx, crldx, :] xa.Output['Cross-line gradient'] = prewitt(xa.Input, axis=1)[ilndx, crldx, :] xa.Output['Z gradient'] = prewitt(xa.Input, axis=2)[ilndx, crldx, :] xa.Output['Average gradient'] = (xa.Output['In-line gradient'] + xa.Output['Cross-line gradient'] + xa.Output['Z gradient']) / 3 xa.doOutput()
def prewitt_operator(input, threshold=3, axis=2): input_y = np.ndarray(shape=(np.shape(input)[1:]), dtype=np.float32) input_x = np.ndarray(shape=(np.shape(input)[1:]), dtype=np.float32) output = np.ndarray(shape=(np.shape(input)), dtype=np.float32) for _ in range(len(input)): input_y[:, :] = ndimage.prewitt(input[_, :, :], 0) input_x[:, :] = ndimage.prewitt(input[_, :, :], 1) if axis == 0: output[_, :, :] = input_y[:, :] elif axis == 1: output[_, :, :] = input_x[:, :] elif axis == 2: output[_, :, :] = np.sqrt( np.square(input_x[:, :]) + np.square(input_y[:, :])) output[_, :, :] = Relu_threshold(output[_, :, :], threshold=threshold) return output
def prewit(self): from scipy.ndimage import prewitt img = cv2.imread(self.fileName2, 0) prewit = prewitt(img) cv2.imwrite('./sonuclar/prewitt.png', prewit) w, h = self.gvFiltreIslem.width() - 5, self.gvFiltreIslem.height() - 5 self.gvFiltreIslem.setScene( self.show_image('./sonuclar/prewitt.png', w, h))
def hpfPrewitt(fileName,num): workData = getData(fileName,num) preFilter = ndimage.prewitt(workData) mfSave = Image.fromarray(preFilter) mfSave = mfSave.convert('1') mfSave.save('Prewitt Filter.png') imageGUI.imdisplay('Prewitt Filter.png','Prewitt',1)
def test_multiple_modes_prewitt(): # Test prewitt filter for multiple extrapolation modes arr = np.array([[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 0.0]]) expected = np.array([[1.0, -3.0, 2.0], [1.0, -2.0, 1.0], [1.0, -1.0, 0.0]]) modes = ["reflect", "wrap"] assert_equal(expected, sndi.prewitt(arr, mode=modes))
def test_multiple_modes_prewitt(): # Test prewitt filter for multiple extrapolation modes arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) expected = np.array([[1., -3., 2.], [1., -2., 1.], [1., -1., 0.]]) modes = ['reflect', 'wrap'] assert_equal(expected, sndi.prewitt(arr, mode=modes))
def file_controller(in_path, out_path): """ ---------- pre-process image ---------- """ image = cv2.imread(in_path) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (3, 3), 0) """ ---------- create cv2 canny detections ---------- """ start = time.time() cannywide = cv2.Canny(blurred, 10, 200) end = time.time() print("canny_wide: " + str(end - start)) start = time.time() cannytight = cv2.Canny(blurred, 225, 250) end = time.time() print("canny_tight: " + str(end - start)) start = time.time() cannyauto = auto_canny(blurred) # uses auto_canny function with sigma end = time.time() print("canny_auto: " + str(end - start)) """ ---------- create laplacian detection ---------- """ start = time.time() lap = cv2.Laplacian(blurred,cv2.CV_64F) end = time.time() print("laplacian: " + str(end - start)) """ ---------- create sobel operations ---------- """ start = time.time() sobelx = cv2.Sobel(blurred,cv2.CV_64F,1,0,ksize=5) # x end = time.time() print("sobel_x: " + str(end - start)) start = time.time() sobely = cv2.Sobel(blurred,cv2.CV_64F,0,1,ksize=5) # y end = time.time() print("sobel_y: " + str(end - start)) """ ---------- create prewitt operator ---------- """ start = time.time() prewitt = ndi.prewitt(blurred) end = time.time() print("prewitt: " + str(end - start)) """ ---------- write detections to files ---------- """ start = time.time() cv2.imwrite(out_path + '_canny_auto.jpg', cannyauto) cv2.imwrite(out_path + '_canny_tight.jpg', cannytight) cv2.imwrite(out_path + '_canny_wide.jpg', cannywide) cv2.imwrite(out_path + '_laplacian.jpg', lap) cv2.imwrite(out_path + '_sobel_x.jpg', sobelx) cv2.imwrite(out_path + '_sobel_y.jpg', sobely) cv2.imwrite(out_path + '_prewitt.jpg', prewitt) end = time.time() print("Writing images out: " + str(end - start))
def on_prewitt_btn_filter_clicked(self): from scipy.ndimage import prewitt img = cv2.imread(self.fileName) prewit = prewitt(img) cv2.imwrite('./islenen/prewitt.png', prewit) w, h = self.operationsGV_filterTab.width( ) - 5, self.operationsGV_filterTab.height() - 5 self.operationsGV_filterTab.setScene( self.show_image('./islenen/prewitt.png', w, h))
def prewitt(in_path, out_path): """ Take a directory path, writes result to another path """ print("Creating prewitt...") start = time.time() # timer start img = image_preprocess(in_path) p = ndi.prewitt(img) cv2.imwrite(out_path + '_prewitt.jpg', p) end = time.time() # timer end print("total time: " + str(round(end - start, 2)))
def convolute(self, image, filtr): from scipy import ndimage if filtr == 'SOBEL': return ndimage.sobel(image) if filtr == 'GAUSSIAN': return ndimage.gaussian_filter(image,sigma=20) if filtr == 'LAPLACE': return ndimage.laplace(image) if filtr == 'UNIFORM': return ndimage.uniform_filter(image) if filtr == 'PREWITT': return ndimage.prewitt(image) ######################################################################
def prewittFilterRGB(inputData): r = np.zeros(inputData.shape) g = np.zeros(inputData.shape) b = np.zeros(inputData.shape) ndimage.prewitt(inputData, axis=1, output=r) ndimage.prewitt(inputData, axis=0, output=g) ndimage.prewitt(inputData, axis=2, output=b) return GradientCalculation.normalize(np.concatenate((r[...,np.newaxis],g[...,np.newaxis],b[...,np.newaxis]),axis=3))
def filter(img): #Take greyscale image of 256 * 192 result = img result = nd.sobel(result) result = nd.prewitt(result) result = nd.median_filter(result, size=20) #Eliminate grey areas more than 190 these are white areas mask1 = result > 190 mask1 = (mask1 != True) mask1 = mask1 * 1 result = result * mask1 #Return greyscale image of 256 * 192 img = Image.fromarray(result) return img.convert('L')
def crFind(img, var, nsig=10., sigfrac=0.3): simg = img / var**0.5 deriv = ndimage.prewitt(abs(simg)) deriv = ndimage.sobel(deriv) mean, std = clip(deriv[deriv != 0.]) crmask = numpy.where(abs(deriv) > 15 * std, 1, 0) crmask = ndimage.maximum_filter(crmask, 3) #crmask = ndimage.minimum_filter(crmask,3) crmask = numpy.where((crmask == 1) & (simg > 5), 1, 0) return crmask thresh = nsig * var**0.5 n = 5 blkimg = img.repeat(n, 0).repeat(n, 1) deriv = ndimage.laplace(blkimg) * -1. deriv[deriv < 0] = 0. d = iT.resamp(deriv, n) m = numpy.where((d > thresh) & (img > thresh), 1, 0) bmap = ndimage.maximum_filter(m, 5) cond = (bmap == 1) & (d > thresh * sigfrac) & (img > thresh * sigfrac) m[cond] = 1 return m
import numpy as np import scipy.misc as sm import scipy.ndimage as sn import matplotlib.pyplot as mp # 加载图像 image1 = sm.ascent().astype(np.float32) # 均值滤波 image2 = sn.median_filter(image1, (42, 42)) # 旋转图像 image3 = sn.rotate(image1, angle=45) # 浮雕图像 image4 = sn.prewitt(image1) mp.gcf().set_facecolor(np.ones(3) * 240 / 255) mp.subplot(221) mp.title('Original', fontsize=16) mp.xlabel('Width', fontsize=12) mp.ylabel('Height', fontsize=12) ax = mp.gca() ax.xaxis.set_major_locator(mp.MultipleLocator(128)) ax.xaxis.set_minor_locator(mp.MultipleLocator(32)) ax.yaxis.set_major_locator(mp.MultipleLocator(128)) ax.yaxis.set_minor_locator(mp.MultipleLocator(32)) mp.tick_params(labelsize=10) mp.grid(linestyle=':') mp.imshow(image1, cmap='gray') mp.subplot(222) mp.title('Median', fontsize=16) mp.xlabel('Width', fontsize=12) mp.ylabel('Height', fontsize=12) ax = mp.gca() ax.xaxis.set_major_locator(mp.MultipleLocator(128))
mp.axis('off') mp.imshow(original, cmap='gray') # #高斯模糊 median = sn.median_filter(original, 10) mp.subplot(2, 2, 2) mp.axis('off') mp.imshow(median, cmap='gray') print(median) #旋转 median1 = sn.rotate(original, 45) mp.subplot(2, 2, 3) mp.axis('off') mp.imshow(median1, cmap='gray') print(median1) #边缘识别 prewitt = sn.prewitt(original) mp.subplot(2, 2, 4) mp.axis('off') mp.imshow(prewitt, cmap='gray') print(prewitt) # #角度旋转 # rotate = sn.rotate(original, 45) # #边缘识别 # prewitt = sn.prewitt(original) # mp.figure('Image', facecolor='lightgray') # mp.subplot(221) # mp.title('Original', fontsize=16) # mp.axis('off') # mp.imshow(original, cmap='gray') # mp.subplot(222)
def __init__(self, image_matrix, sigma=1.0, thresHigh=40, thresLow=6, thresHighLimit=2**18): self.imin = image_matrix self.thresHigh = thresHigh self.thresLow = thresLow mask = numpy.ones(self.imin.shape, dtype=bool) fsmooth = lambda x: gaussian_filter(x, sigma, mode='constant') imout = smooth_with_function_and_mask(self.imin, fsmooth, mask) grady = ndi.prewitt(imout, axis=1, mode='constant') * -1.0 gradx = ndi.prewitt(imout, axis=0, mode='constant') grad = numpy.hypot(gradx, grady) # Net gradient is the square root of sum of square of the horizontal # and vertical gradients # grad = numpy.hypot(gradx, grady) theta = numpy.arctan2(grady, gradx) theta = 180 + (180 / pi) * theta # Only significant magnitudes are considered. All others are removed x, y = where(grad < 10) theta[x, y] = 0 grad[x, y] = 0 # The angles are quantized. This is the first step in non-maximum # supression. Since, any pixel will have only 4 approach directions. x0, y0 = where(((theta < 22.5) + (theta >= 157.5) * (theta < 202.5) + (theta >= 337.5)) == True) x45, y45 = where(((theta >= 22.5) * (theta < 67.5) + (theta >= 202.5) * (theta < 247.5)) == True) x90, y90 = where(((theta >= 67.5) * (theta < 112.5) + (theta >= 247.5) * (theta < 292.5)) == True) x135, y135 = where(((theta >= 112.5) * (theta < 157.5) + (theta >= 292.5) * (theta < 337.5)) == True) # self.theta = theta # Image.fromarray(self.theta).convert('L').save('Angle map.jpg') theta[x0, y0] = 0. theta[x45, y45] = 45. theta[x90, y90] = 90. theta[x135, y135] = 135. self.grad = grad[1:-1, 1:-1] self.theta = theta[1:-1, 1:-1] x, y = self.grad.shape grad2 = self.grad.copy() for i in range(x): for j in range(y): if self.theta[i, j] == 0.: test = self.nms_check(grad2, i, j, 1, 0, -1, 0) if not test: self.grad[i, j] = 0 elif self.theta[i, j] == 45.: test = self.nms_check(grad2, i, j, 1, -1, -1, 1) if not test: self.grad[i, j] = 0 elif self.theta[i, j] == 90.: test = self.nms_check(grad2, i, j, 0, 1, 0, -1) if not test: self.grad[i, j] = 0 elif self.theta[i, j] == 135.: test = self.nms_check(grad2, i, j, 1, 1, -1, -1) if not test: self.grad[i, j] = 0 init_point = self.initPt(thresHighLimit) # Hysteresis tracking. Since we know that significant edges are # continuous contours, we will exploit the same. # thresHigh is used to track the starting point of edges and # thresLow is used to track the whole edge till end of the edge. self.segments = Segments.Segments() segment = [init_point] while init_point != -1: # print 'next segment at',init_point self.grad[init_point[0], init_point[1]] = -1 p2 = init_point p1 = init_point p0 = init_point p0 = self.nextNbd(p0, p1, p2) while p0 != -1: segment.append(p0) p2 = p1 p1 = p0 self.grad[p0[0], p0[1]] = -1 p0 = self.nextNbd(p0, p1, p2) if len(segment) >= 2: self.segments.append(segment) init_point = self.nextPt(self.grad) segment = [init_point] self.stippleSegmentList = []
# different edge detection methods cam.capture(data, 'rgb') # capture image # diff of gaussians t0 = time.time() grad_xy = scimg.gaussian_gradient_magnitude(data[:, :, 0], sigma=1.5) ##grad_xy = np.mean(grad_xy,2) t_grad_xy = time.time() - t0 # laplacian of gaussian t0 = time.time() lap = scimg.gaussian_laplace(data[:, :, 0], sigma=0.7) t_lap = time.time() - t0 # Canny method without angle t0 = time.time() gaus = scimg.fourier_gaussian(data[:, :, 0], sigma=0.05) can_x = scimg.prewitt(gaus, axis=0) can_y = scimg.prewitt(gaus, axis=1) can = np.hypot(can_x, can_y) ##can = np.mean(can,2) t_can = time.time() - t0 # Sobel method t0 = time.time() sob_x = scimg.sobel(data[:, :, 0], axis=0) sob_y = scimg.sobel(data[:, :, 0], axis=1) sob = np.hypot(sob_x, sob_y) ##sob = np.mean(sob,2) t_sob = time.time() - t0 # plotting routines and labeling fig, ax = plt.subplots(2, 2, figsize=(12, 6)) ax[0, 0].pcolormesh(x, y, grad_xy, cmap='gray')
def eval(self, source_data): return ndimage.prewitt(source_data, *self.args, **self.kwargs)
dataDir = "/home/arb/Delme/" plt.imshow(griddata, origin='lower') plt.gray() cb = plt.colorbar() cb.set_label('Value Range') plt.xlabel('GridEast') plt.ylabel('GridNorth') plt.suptitle('Raw data') plt.savefig(dataDir + 'r15_raw' + '.png') plt.show() #Calculate derivatives gridSobel = nd.sobel(griddata) gridLaplace = nd.laplace(griddata) gridPrewitt = nd.prewitt(griddata) gridGaussian = nd.gaussian_filter(griddata, 1) gridMinimum = nd.minimum_filter(griddata, size=(3, 3)) #Plot a derivative plt.imshow(gridGaussian, origin='lower') plt.gray() #show image cb = plt.colorbar() cb.set_label('Value Range') plt.xlabel('GridEast') plt.ylabel('GridNorth') plt.suptitle('Raw data') plt.savefig(dataDir + 'r15_gaussianDerivative' + '.png') plt.show()
img = plt.imshow(image, cmap=plt.cm.gray) #中值滤波器扫描信息的每一个数据点,并替换为相邻数据点的中值。对 #图像应用中值滤波器并显示在第二个子图中 plt.subplot(222) plt.title("Median Filter") filtered = ndimage.median_filter(image, size=(42, 42)) plt.imshow(filtered, cmap=plt.cm.gray) #旋转图像并显示在第三个子图中 plt.subplot(223) plt.title("Rotated") rotated = ndimage.rotate(image, 90) plt.imshow(rotated, cmap=plt.cm.gray) #Prewitt滤波器是基于图像强度的梯度计算 plt.subplot(224) plt.title("Prewitt Filter") filtered = ndimage.prewitt(image) plt.imshow(filtered, cmap=plt.cm.gray) plt.show() print u"音频处理" #使用scipy.io.wavfile模块中的read函数可以将该文件转换为一个NumPy #数组 #使用read函数读入文件,返回采样率和音频数据 from scipy.io import wavfile import urllib2 import sys sample_rate, data = wavfile.read(WAV_FILE) #应用tile函数 repeated = np.tile(data, 4) #使用write函数写入一个新文件
def sky_ref_patch_detection(I_origin): """ RETURN: S: list:[Sb, Sg, Sr] sky_prob_map """ I_gray = cv2.cvtColor(I_origin, cv2.COLOR_BGR2GRAY) sky_prob_map = np.zeros(I_gray.shape) # initialize sky prob map with pixels strictly within ideal blue range sky_prob_map[ in_idea_blue_rg(I_origin) ] = 1.0 # sky is ONLY in top one third # sky_prob_map[sky_prob_map.shape[0]*1.0/3:-1,...] = 0.0 # exponentially decrease sky prob where gradient is too large (>_grad_percent*255.) _grad_x = np.absolute( ndi.prewitt(I_gray, axis=1 ,mode='nearest') ) _grad_y = np.absolute( ndi.prewitt(I_gray, axis=0 ,mode='nearest') ) _rv = norm(loc=0., scale=255./3.) # 3*sigma = 255, rv ~ random variable _grad_percent = 0.10 # 0.05 of the original paper cond_mod_sky_prob = (sky_prob_map ==1.0) & \ ((_grad_x >_grad_percent*255.) | (_grad_y >_grad_percent*255.)) # average of gradx, y sky_prob_map[ cond_mod_sky_prob ] = _rv.pdf( (_grad_x + _grad_y)/2.0 )[ cond_mod_sky_prob ] # detect bimodal _L = cv2.cvtColor(I_origin, cv2.COLOR_BGR2LAB)[...,0] detect_res = cape_util.detect_bimodal( [cape_util.get_smoothed_hist( cape_util.mask_skin(_L, sky_prob_map!=0.0) )] )[0] # detect_bimodal is an array f if (detect_res[0] == True): #could return F or None, must be ==True print 'bimodal detected in current sky_ref_patch' sky_prob_map[ I_gray ==detect_res[1] ] = 0.0 #exclude pixels correspond to the dark mode # get mean and std from each b,g,r channel of detected sky S = [] if ( np.sum( _get_top_one_third(sky_prob_map) ) !=0): # top 1/3 has sky sky_prob_map_bgr = _2to3(sky_prob_map) for i in range(3): #B, G, R masked_I_one_thrid = cape_util.mask_skin( # mask non-blue area 0, copy the rest _get_top_one_third(I_origin)[...,i] , _get_top_one_third(sky_prob_map)!=0.0 # sky_prob map changed after previous step ) mean, std = get_sky_ref_patch( masked_I_one_thrid, sky_prob_map ) S.append(mean) _rv_sky_patch = norm(loc=mean, scale=std) # re-assign (where sky prob>0), normalize to p(median) = 1.0 sky_prob_map_bgr[...,i][sky_prob_map>0.0] = \ _rv_sky_patch.pdf(I_origin[...,i])[sky_prob_map>0.0] / _rv_sky_patch.pdf(mean) _b=1.; _g=5.; _r=3. sky_prob_map = (_b*sky_prob_map_bgr[...,0] + _g*sky_prob_map_bgr[...,1] + _r*sky_prob_map_bgr[...,2]) / (_b+_g+_r) # remove small patches: image opening on sky_prob_map _h,_w = sky_prob_map.shape[0:2] _kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(_h/12,_w/16)) sky_mask_opened = cv2.morphologyEx(sky_prob_to_01(sky_prob_map).astype('uint8'), cv2.MORPH_OPEN, _kernel) sky_prob_map[sky_mask_opened==0.0] = 0.0 # set sky_prob_map to 0 where removed from sky_mask_opened else: print 'sky not detected in top 1/3' plt.imshow(sky_prob_map); plt.show() # rainbow map print 'S(b,g,r): ',S return S, sky_prob_map
def run(self, ips, img, buf, para=None): nimg.prewitt(img, output=buf)
import numpy as np import matplotlib.pyplot as plt from scipy import ndimage image = misc.lena().astype(np.float32) plt.subplot(221) plt.title("Original Image") img = plt.imshow(image, cmap=plt.cm.gray) plt.axis("off") plt.subplot(222) plt.title("Median Filter") filtered = ndimage.median_filter(image, size=(42,42)) plt.imshow(filtered, cmap=plt.cm.gray) plt.axis("off") plt.subplot(223) plt.title("Rotated") rotated = ndimage.rotate(image, 90) plt.imshow(rotated, cmap=plt.cm.gray) plt.axis("off") plt.subplot(224) plt.title("Prewitt Filter") filtered = ndimage.prewitt(image) plt.imshow(filtered, cmap=plt.cm.gray) plt.axis("off") plt.show()
def prewitt_image(image): return sn.prewitt(image)
from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import BaggingClassifier from scipy import ndimage from sklearn import linear_model import matplotlib.pyplot as plt Targets = np.genfromtxt("data/targets.csv") Data = [] for i in range(1, 279): imagefile = nib.load("data/set_train/train_" + str(i) + ".nii") image = imagefile.get_data() I = image[:, :, :, 0] I = np.asarray(I, dtype='float') I = ndimage.prewitt(I, axis=0) I = ndimage.gaussian_filter(I, sigma=1) imagefile.uncache() Data.append(np.asarray(I)) Data = np.asarray(Data) X_train, X_test, y_train, y_test = \ train_test_split(Data, Targets, test_size=0.33, random_state=42, stratify=Targets) #X_train = Data #y_train = Targets print "fitting has started" clf = linear_model.LogisticRegression()
def reliefshade(deminfo, dem, surface): vertical_exaggeration = 2.0 # light_gamma < 1 brings out detail in the flats, hides it in hills facing the lightsource light_gamma = 0.9 light_level = 0.5 # shade_gamma > 1 brings out detail in darkest slopes at the expense of some gentle ones shade_gamma = 2.0 shade_level = 0.5 # imhof sez the light is golden - #fffbf4 maybe? # and the shade is purple - #060009 maybe? #lightcolor = (1.0, 0.98, 0.95) #shadecolor = (1.0, 0.98, 0.95) #shadecolor = (0.7, 0.8, 1.0) #lightcolor = (1.0, 1.0, 1.0) lightcolor = (1.0, 1.0, 0.9) shadecolor = (1.0, 1.0, 1.0) #shadecolor = (0.8, 0.8, 1.0) # scale adjusts for horizontal and vertical units as well as # any height exaggeration. scale = vertical_exaggeration / deminfo.grid.meters_per_grid() # 2d prewitt filter kernel with axis=1 is [[-1, 0, 1] # [-1, 0, 1] # [-1, 0, 1]] dvdx = scale * ndimage.prewitt(dem, axis=1, mode='nearest') dvdy = -scale * ndimage.prewitt(dem, axis=0, mode='nearest') #glumpy_loop(N.outer(dvdx, (1,1,1)).reshape(surface.shape)) # the idea of separating light and shadow is from lars # ahlzen's toposm work, this is slightly different. # normal vector is (-dvdx, -dvdy, 1) # norm of normal is sqrt(dvdx * dvdx + dvdy * dvdy + 1) # vector toward light is (-1, 1, 1) # norm of light vector sqrt(3) norm = N.sqrt(3 * (dvdx * dvdx + dvdy * dvdy + 1)) # light_dir = (-1, 1, 1) # cos(theta) = light_dir . normal / |normal| |light_dir| # costh = (dvdx - dvdy + 1) / norm # separate into illumination and (self-)shadow # costh_level is costh for a horizontal surface. anything brighter # than this is light, anything darker is shade. # this is (light=0,shade=1) costh_level = 1.0 / sqrt(3) # the minimum possible costh is for a cliff facing southwest # which would have dvdx=-BIG and dvdy=+BIG. plug that into # the costh formula and you get -sqrt(2/3) # this is the darkest possible shade (light=0,shade=0) costh_min = -sqrt(2.0 / 3.0) # light is 1.0 for a surface pointing sunward and 0.0 at horizontal light = N.clip((costh - costh_level) / (1 - costh_level), 0, 1) light = light ** (1/light_gamma) # shade is 1.0 for horizontal and 0.0 for a vertical cliff facing # away from the lightsource. # note that shade=0 is still darkest, 1 is lightest shade = N.clip((costh-costh_min) / (costh_level-costh_min), 0, 1) shade = shade ** (1/shade_gamma) # this would be a great place for a "tone mapping" like step # to adapt the relief to meadows or mountains. # N.outer flattens so must reshape to rgb afterward light = N.outer(light, lightcolor).reshape(surface.shape) shade = N.outer(shade, shadecolor).reshape(surface.shape) # moderate the effect of light and shade here light = light * light_level shade = shade * shade_level + (1 - shade_level) # light brings the surface closer to the light color # XXX the white highlights look like plastic, mix in # the surface color a bit more? lit = (1 - (1-surface) * (1-light)) # shade darkens to black anywhere relief = lit * shade return relief