def edges(cls): from scipy import ndimage, misc import numpy as np from skimage import feature col = Image.open("f990.jpg") gray = col.convert('L') # Let numpy do the heavy lifting for converting pixels to pure black or white bw = np.asarray(gray).copy() # Pixel range is 0...255, 256/2 = 128 bw[bw < 245] = 0 # Black bw[bw >= 245] = 255 # White bw[bw == 0] = 254 bw[bw == 255] = 0 im = bw im = ndimage.gaussian_filter(im, 1) edges2 = feature.canny(im, sigma=2) labels, numobjects =ndimage.label(im) slices = ndimage.find_objects(labels) print('\n'.join(map(str, slices))) misc.imsave('f990_sob.jpg', im) return #im = misc.imread('f990.jpg') #im = ndimage.gaussian_filter(im, 8) sx = ndimage.sobel(im, axis=0, mode='constant') sy = ndimage.sobel(im, axis=1, mode='constant') sob = np.hypot(sx, sy) misc.imsave('f990_sob.jpg', edges2)
def calc_orient(image): #image = cv2.imread('test_window2.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) #image = resize.resize_img(image) #dbl_image = image.astype(float) #cv2.imshow('orginal', image) smoothed_im = cv2.GaussianBlur(image,(3,3), 0) # gaussian blur using 3x3 window dx = ndimage.sobel(smoothed_im, 0) # horizontal sobel gradient dy = ndimage.sobel(smoothed_im, 1) # vertical sobel gradient print dy theta_mx = np.array([[]]) for y in range(0,len(dy)): for x in range(0,len(dx)): theta_mx[x][y] = math.atan2(dy[y],dx[x]) #theta = # in radians print theta_mx #cv2.imshow('dx',dx) #cv2.waitKey() lines_im = draw_lines2.draw_lines(image, theta, 1) #print lines_im return lines_im #print dx.shape[0],dx.shape[1] #print dx #print 'hello',dy #calc_orient()
def test_multiple_modes(): # Test that the filters with multiple mode cababilities for different # dimensions give the same result as applying a single mode. arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) mode1 = 'reflect' mode2 = ['reflect', 'reflect'] assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1), sndi.gaussian_filter(arr, 1, mode=mode2)) assert_equal(sndi.prewitt(arr, mode=mode1), sndi.prewitt(arr, mode=mode2)) assert_equal(sndi.sobel(arr, mode=mode1), sndi.sobel(arr, mode=mode2)) assert_equal(sndi.laplace(arr, mode=mode1), sndi.laplace(arr, mode=mode2)) assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1), sndi.gaussian_laplace(arr, 1, mode=mode2)) assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1), sndi.maximum_filter(arr, size=5, mode=mode2)) assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1), sndi.minimum_filter(arr, size=5, mode=mode2)) assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1), sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2)) assert_equal(sndi.uniform_filter(arr, 5, mode=mode1), sndi.uniform_filter(arr, 5, mode=mode2))
def image_multi_features(img, maxPixel, num_features,imageSize): # X is the feature vector with one row of features per image # consisting of the pixel values a, num_featuresnd our metric mask = img > img.mean() label_im, nb_labels = ndimage.label(mask) X=np.zeros(num_features, dtype=float) # image=ndimage.median_filter(img, 3) X[imageSize]=img.max() X[imageSize+1]=img.min() X[imageSize+2]=nb_labels extra_sizes=[4,8,12,16,20,24] image = resize(img, (maxPixel, maxPixel)) granulo = granulometry(image,sizes=extra_sizes) sx = ndimage.sobel(image, axis=0, mode='constant') sy = ndimage.sobel(image, axis=1, mode='constant') sob = np.hypot(sx, sy) #edges=canny(image,3,0.3,0.2) # Store the rescaled image pixels X[0:imageSize] = np.reshape(sob,(1, imageSize)) for i in range(len(extra_sizes)): X[imageSize+3+i]=granulo[i] return X
def _compute_derivatives(image, mode='constant', cval=0): """Compute derivatives the way that scikit-image does it (for comparison). This method is fully copied from the repository.""" imgy = ndimage.sobel(image, axis=0, mode=mode, cval=cval) imgx = ndimage.sobel(image, axis=1, mode=mode, cval=cval) return imgx, imgy
def canny(): image = cv2.imread('1_1.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) #image = misc.lena() # apply gaussian blur 3x3 sigma 0.5 smoothed_im = cv2.GaussianBlur(image,(3,3), 0) # calculate gradients with sobel filter grad_x = ndimage.sobel(image, 0) grad_y = ndimage.sobel(image, 1) # calculate magnitude grad_mag = numpy.sqrt(grad_x**2+grad_y**2) # calculating theta grad_angle = numpy.arctan2(grad_y, grad_x) # quantize angles 0 to 3 quantized_angle = numpy.around(3 * (grad_angle + numpy.pi) / (numpy.pi * 2)) # non-maximal suppression # quantize magnitude into 4 directions #NE = ndimage.maximum_filter(grad_mag, footprint=_NE) #cv2.imshow('grad_x',grad_x) #cv2.imshow('grad_y',grad_y) #cv2.imshow('grad_mag',grad_angle) cv2.waitKey()
def showAllImages(): fig, current = plt.figure(), 0 for file_name in glob.glob("*.png"): image = img.imread(file_name) R, G, B = image[:,:,0], image[:,:,1], image[:,:,2] greyscale = 0.21 * R + 0.72 * G + 0.07 * B threshold = 210.0 / 256 greyscale[greyscale > threshold] = 1 greyscale[greyscale <=threshold] = 0 dx = ndimage.sobel(greyscale, 0) # horizontal derivative dy = ndimage.sobel(greyscale, 1) # vertical derivative mag = np.hypot(dx, dy) # magnitude y_min, x_min, y_max, x_max = findMinMax(mag) mag = mag[x_min:x_max+1, y_min:y_max+1] # x, y = mag.shape # ration = float(x) / y # mag = imresize(mag, (50, int(50 / ration))) # # mag[mag > threshold] = 1 # mag[mag <=threshold] = 0 current += 1 if current == 7: fig.add_subplot(1, 1, 1).imshow(mag, cmap=cm.Greys_r) print guessFigure(mag) break plt.tight_layout() plt.show()
def compute_harris_response(image, eps=1e-6): """ compute the Harris corner detector response function for each pixel in the image""" # derivatives image = ndimage.gaussian_filter(image, 1) imx = ndimage.sobel(image, axis=0, mode='constant') imy = ndimage.sobel(image, axis=1, mode='constant') Wxx = ndimage.gaussian_filter(imx * imx, 1.5, mode='constant') Wxy = ndimage.gaussian_filter(imx * imy, 1.5, mode='constant') Wyy = ndimage.gaussian_filter(imy * imy, 1.5, mode='constant') # determinant and trace Wdet = Wxx * Wyy - Wxy ** 2 Wtr = Wxx + Wyy harris = Wdet / (Wtr + eps) # Non maximum filter of size 3 harris_max = ndimage.maximum_filter(harris, 3, mode='constant') harris *= harris == harris_max # Remove the image corners harris[:3] = 0 harris[-3:] = 0 harris[:, :3] = 0 harris[:, -3:] = 0 return harris
def slope_aspect(array, pix_size, scale): dzdx = ndimage.sobel(array, axis=1)/(8.*pix_size) dzdy = ndimage.sobel(array, axis=0)/(8.*pix_size) hyp = numpy.hypot(dzdx,dzdy) slp = numexpr.evaluate("arctan(hyp * scale)") asp = numexpr.evaluate("arctan2(dzdy, -dzdx)") return slp, asp
def _compute_derivatives(image, mode="constant", cval=0): """Compute derivatives in x and y direction using the Sobel operator. Parameters ---------- image : ndarray Input image. mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional How to handle values outside the image borders. cval : float, optional Used in conjunction with mode 'constant', the value outside the image boundaries. Returns ------- imx : ndarray Derivative in x-direction. imy : ndarray Derivative in y-direction. """ imy = ndimage.sobel(image, axis=0, mode=mode, cval=cval) imx = ndimage.sobel(image, axis=1, mode=mode, cval=cval) return imx, imy
def sobel(self, img): fig = scipy.misc.imread(img).astype('int32') dx = ndimage.sobel(fig, 0) # horizontal derivative dy = ndimage.sobel(fig, 1) # vertical derivative mag = np.hypot(dx, dy) # magnitude mag *= 255.0 / np.max(mag) # normalize scipy.misc.imsave('sobel.png', mag)
def gradient_transformation(data): """Gradient distance transformation.""" gx = ndimage.sobel(data, 0) gy = ndimage.sobel(data, 1) gz = ndimage.sobel(data, 2) grad = np.sqrt(gx**2 + gy**2 + gz**2) return grad
def testAll(): fig = plt.figure(figsize=(16, 8)) current = 0 for file_name in glob.glob("img/*.jpg"): image = img.imread(file_name) r = imresize(image, (100, 100)) mask1 = ndimage.binary_erosion(coloration(r), structure=np.ones((3, 3))) greyscale = np.average(image, axis=2) dx = ndimage.sobel(greyscale, 0) # horizontal derivative dy = ndimage.sobel(greyscale, 1) # vertical derivative mask2 = np.hypot(dx, dy) # magnitude current += 1 fig.add_subplot(3, 3, current).imshow(mask1, cmap=cm.Greys_r, interpolation='none') labeled_array, num_features = ndimage.label(mask1) print labeled_array, num_features current += 1 fig.add_subplot(3, 3, current).imshow(mask2, cmap=cm.Greys_r, interpolation='none') current += 1 fig.add_subplot(3, 3, current).imshow(r, cmap=cm.Greys_r, interpolation='none') plt.axis('off') plt.show()
def detectCircles(img, r, useGradient): grayimg = rgb2gray(img) edges = cv2.Canny(img,100,200) ax[0].imshow(edges, cmap=plt.cm.gray) ax[0].set_title('after canny image operation') if useGradient == 0: accumulator1 = np.zeros(edges.shape) for (i,j),value in np.ndenumerate(edges): if value: for t_idx in np.arange(0,2*math.pi,math.pi/100): a = int(i - (r * math.cos(t_idx))); b = int(j + (r * math.sin(t_idx))); if a>0 and b>0 and a < accumulator1.shape[0] and b < accumulator1.shape[1]: accumulator1[a, b] += 1 print accumulator1 ax[1].imshow(accumulator1, cmap=plt.cm.gray) ax[1].set_title('Accumulator array without using gradient') else: dx = ndimage.sobel(grayimg, axis=0, mode='constant') dy = ndimage.sobel(grayimg, axis=1, mode='constant') accumulator = np.zeros(edges.shape) for (i,j),value in np.ndenumerate(edges): if value: gradient = math.atan(-dx[i,j]/(dy[i,j]+0.00001)) for theta in np.arange(gradient-math.pi/4,gradient+math.pi/4,math.pi/100): a = int(i - (r * math.cos(theta))); b = int(j + (r * math.sin(theta))); if a < accumulator.shape[0] and b < accumulator.shape[1]: accumulator[a, b] += 1 ax[1].imshow(accumulator, cmap=plt.cm.gray) ax[1].set_title('Accumulator array with gradient') print accumulator return
def calc_orient(image): #image = cv2.imread('test_window.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) #image = resize.resize_img(image) #dbl_image = image.astype(float) #cv2.imshow('orginal', image) smoothed_im = cv2.GaussianBlur(image,(3,3), 0) # gaussian blur using 3x3 window dx = ndimage.sobel(smoothed_im, 0) # horizontal sobel gradient dy = ndimage.sobel(smoothed_im, 1) # vertical sobel gradient max_dx = np.max(dx)-np.min(dx) max_dy = np.max(dy)-np.min(dy) # print max_dx #print max_dy theta = math.atan2(max_dy,max_dx) # in radians print 'grad',theta #cv2.imshow('dx',dx) #cv2.waitKey() orientation_im = draw_lines.draw_lines(image, theta) return orientation_im #print dx.shape[0],dx.shape[1] #print dx #print 'hello',dy #calc_orient()
def get_hist_angle_bins(img): ''' given a python image, return 2 lists of 16 elements each. 1 list (bin_lowers) has the lower bounds for each bin, the other list (hist_vals) has the number of pixels in each bin. ''' im = numpy.array(img) im = numpy.resize(im,(8,8))#reshape array to model 8x8 cell sx = ndimage.sobel(im, axis=0, mode = 'constant')#apply sobel operator in x-direction sy = ndimage.sobel(im, axis=1, mode = 'constant')#apply sobel operator in y-direction sx = list(numpy.array(sx).reshape(-1,))#reshape sobel output into 1-d list for easy manipulation sy = list(numpy.array(sx).reshape(-1,)) sobel = [] for index in range(0,64): x = float(sx[index]) y = float(sy[index]) if x != 0:#Avoid a divide-by-zero error angle = math.degrees((math.atan2(y,x))) #does this give the angle we're looking for? trying to get the full 360-degree range else: angle = math.degrees((math.atan2(y,1)))#what should the value be if x is 0? if angle<0: angle +=360 sobel.append(angle) hist, bin_edges = numpy.histogram(sobel, bins = 16) bin_lowers = list(numpy.array(bin_edges).reshape(-1,))#unnecessary because i've already reshaped the data? bin_lowers.pop()#gets rid of the high side of the highest bin hist_vals = list(numpy.array(hist).reshape(-1,))#also unnecessary? return bin_lowers, hist_vals
def load_contourlabel(self, idx): """ Load label image as 1 x height x width integer array of label indices. The leading singleton dimension is required by the loss. """ import scipy.io mat = scipy.io.loadmat('{}/cls/{}.mat'.format(self.sbdd_dir, idx)) label = mat['GTcls'][0]['Segmentation'][0].astype(np.uint8) from scipy import ndimage edge_horizont = ndimage.sobel(label, 0) edge_vertical = ndimage.sobel(label, 1) magnitude = np.hypot(edge_horizont, edge_vertical) contour_label = np.zeros(label.shape,dtype=np.uint8) contour_mask = np.zeros(label.shape,dtype=np.uint8) label[label==0]=254 label[label<254]=1 contour_mask[magnitude>0] = 255 kernel = np.ones((5,5),np.uint8) import cv2 #contour_mask = cv2.dilate(contour_mask,kernel,iterations = 1) #contour_mask = cv2.erode(contour_mask,kernel,iterations = 1) contour_label[label==1]=1 contour_label[label==254]=0 contour_label[contour_mask==255]=2 contour_label = contour_label[np.newaxis, ...] return contour_label
def sobel_gradient(Im): Imy=nd.sobel(Im, axis=0) Imx=nd.sobel(Im, axis=1) ImMag=(Imx**2 +Imy**2)**0.5 return ImMag, Imx, Imy
def canny(image, high_threshold, low_threshold): grad_x = ndimage.sobel(image, 0) grad_y = ndimage.sobel(image, 1) grad_mag = numpy.sqrt(grad_x**2+grad_y**2) grad_angle = numpy.arctan2(grad_y, grad_x) # next, scale the angles in the range [0, 3] and then round to quantize quantized_angle = numpy.around(3 * (grad_angle + numpy.pi) / (numpy.pi * 2)) # Non-maximal suppression: an edge pixel is only good if its magnitude is # greater than its neighbors normal to the edge direction. We quantize # edge direction into four angles, so we only need to look at four # sets of neighbors NE = ndimage.maximum_filter(grad_mag, footprint=_NE) W = ndimage.maximum_filter(grad_mag, footprint=_W) NW = ndimage.maximum_filter(grad_mag, footprint=_NW) N = ndimage.maximum_filter(grad_mag, footprint=_N) thinned = (((grad_mag > W) & (quantized_angle == _N_d )) | ((grad_mag > N) & (quantized_angle == _W_d )) | ((grad_mag > NW) & (quantized_angle == _NE_d)) | ((grad_mag > NE) & (quantized_angle == _NW_d)) ) thinned_grad = thinned * grad_mag # Now, hysteresis thresholding: find seeds above a high threshold, then # expand out until we go below the low threshold high = thinned_grad > high_threshold low = thinned_grad > low_threshold canny_edges = ndimage.binary_dilation(high, structure=numpy.ones((3,3)), iterations=-1, mask=low) return grad_mag, thinned_grad, canny_edges
def make_sobel(self): """Find edges using the Sobel filter""" luminance = self.arrays['l'] sx = ndimage.sobel(luminance, axis=0, mode='nearest') sy = ndimage.sobel(luminance, axis=1, mode='nearest') sob = numpy.hypot(sx, sy) return sx, sy, sob
def structure_tensor(im, radius): bi = ndimage.gaussian_filter(im.astype(np.float32), 1) imy = ndimage.sobel(bi, axis=0) imx = ndimage.sobel(bi, axis=1) Axx = ndimage.gaussian_filter(imx ** 2, radius) Axy = ndimage.gaussian_filter(imx * imy, radius) Ayy = ndimage.gaussian_filter(imy ** 2, radius) return Axx, Axy, Ayy
def compute_energy(im): im = rgb2gray(im) im = im.astype('int32') dx = ndimage.sobel(im, 0) dy = ndimage.sobel(im, 1) mag = numpy.hypot(dx,dy) mag *= 255.0/numpy.max(mag) return mag
def detect_edges(self): sx = ndimage.sobel(self.image_raw.image, axis=0, mode='constant') sy = ndimage.sobel(self.image_raw.image, axis=1, mode='constant') image_edges = np.hypot(sx, sy) t = image_edges.mean()*1.4 image_edges[image_edges<t]=1 image_edges[image_edges>=t]=0 self.image_bw.image = image_edges
def apply_sobel_filter(image): """ Apply sobel filter on an image, return the filtered object. This routine roughly follows the solution provided in: http://stackoverflow.com/questions/7185655/applying-the-sobel-filter-using-scipy """ dx = ndimage.sobel(image, 0) # horizontal derivative dy = ndimage.sobel(image, 1) # vertical derivative mag = numpy.hypot(dx, dy) # magnitude return mag
def sobelFunction(s): im = scipy.misc.imread(s) im = im.astype('int32') imagename = s[:-4] dx = ndimage.sobel(im, 0) # horizontal derivative dy = ndimage.sobel(im, 1) # vertical derivative mag = numpy.hypot(dx, dy) # magnitude mag *= 255.0 / numpy.max(mag) # normalize (Q&D) scipy.misc.imsave(imagename+'SobelOutput.jpg', mag)
def TestsobHypot(rec): x=ndimage.sobel(abs(rec)**2,axis=0, mode='constant') y=ndimage.sobel(abs(rec)**2,axis=1, mode='constant') hype = np.hypot(x,y) hype = hype.mean(axis=0).mean(axis=0) index = hype.argmax() return index
def add_basic_transforms(self): rgb = [_Transform(lambda im, k=i: im[..., k]) for i in range(3)] laplace = [_Transform(lambda im, k=i: ndimage.filters.laplace(im)[..., k]) for i in range(3)] grad = [_Transform(lambda im, k=i: np.arctan2(ndimage.sobel(im[..., k], 0), ndimage.sobel(im[..., k], 1))) for i in range(3)] + [_Transform( lambda im, k=i: np.hypot(ndimage.sobel(im[..., k], 0), ndimage.sobel(im[..., k], 1))) for i in range(3)] # svm = Svm() # svm.load('svm_skin.pkl') # skin = [_Transform(lambda im: svm.classify_vec(im))] self.transform_lists += [rgb, laplace, grad]
def ExtractEdge(imdata): dx = ndimage.sobel(imdata.getDataBuffer(), 0) dy = ndimage.sobel(imdata.getDataBuffer(), 1) mag = np.hypot(dx, dy) mag = mag.astype("int32") mag = 255 * mag / np.max(np.max(mag)) imdata.setDataBuffer(mag) return imdata
def energy_image_2 (im): gray_img = rgb2gray(im) double_img = im2double(gray_img) dx = ndimage.sobel(im, 0) # horizontal derivative dy = ndimage.sobel(im, 1) # vertical derivative mag = numpy.hypot(dx, dy) # magnitude mag *= 255.0 / numpy.max(mag) # normalize (Q&D) scipy.misc.imsave('sobel.jpg', mag) #gradient_img = ndimage.filters.sobel(double_img, axis=-1, output=None, mode='reflect', cval=0.0) return im2double(gradient_img)
def saveEdges(binary, name): """ Creates an image where you only see the edges of the particles. """ dilatedForSobel = binary.astype(np.int) dilatedForSobel[binary] = 255 dx = ndimage.sobel(dilatedForSobel, 0) # horizontal derivative dy = ndimage.sobel(dilatedForSobel, 1) # vertical derivative mag = np.hypot(dx, dy) # magnitude cv2.imwrite(name+"_"+_NAME_OF_EDGES_IMAGE, mag)
def sobel(file): sx = ndimage.sobel(file, axis=0, mode='constant') sy = ndimage.sobel(file, axis=1, mode='constant') sob = np.hypot(sx, sy) #es warn 235 und 250 sob = split_histogram(sob, 50000, 80000) sx2 = ndimage.sobel(sob, axis=0, mode='constant') sy2 = ndimage.sobel(sob, axis=1, mode='constant') sob2 = np.hypot(sx2, sy2) #sob2 = split_histogram(sob2, 0, 200) return sob2
def process(img): # processes (resize, color, crop, etc) the image to be sent # to trained model for evaluation crop = img[tah:h, 0:(w // 3)] # crops resized = cv2.resize(crop, (28, 28)) # resizes image to 28*28 pixels resized = ndimage.gaussian_filter(resized, 0.25) #gassian filter with sigma = 0.25 ho = ndimage.sobel(resized, 0) #horizontal sobel edge detection v = ndimage.sobel(resized, 1) # vertical sobel edge detection inverted = np.hypot(ho, v) # combine both edge detection images return inverted #return processed image
def apply_sobel_2d(img): """ Takes in the data array from a nii file and applies the Sobel gradient operator on it. Sobel gradient directions are binned from 0-8. Parameters: NUMPY.NDARRAY representing the greyscale voxel values Returns: NUMPY.NDARRAY of the binned Sobel direction for each voxel NUMPY.NDARRAY of the Sobel magnitude for each voxel """ start = time.time() sobel_threshold = 2 #matrices of sobel gradients in x and y directions dx = ndimage.sobel(img, 0) dy = ndimage.sobel(img, 1) shape = dx.shape sobel_magnitudes = numpy.ndarray(shape=shape) sobel_directions = numpy.ndarray(shape=shape) #for every pixel for x in range(shape[0]): for y in range(shape[1]): coord = tuple((x, y)) mx = dx.item(coord) my = dy.item(coord) #calculate gradient magnitude mag = math.sqrt((mx**2) + (my**2)) sobel_magnitudes.itemset(coord, mag) #if mag is high enough, calculate gradient bin and save it if mag > sobel_threshold: bin = bin_sobel_2d(mx, my) sobel_directions.itemset(coord, bin) else: sobel_directions.itemset(coord, 0) # print("TIME: " + str(time.time()-start)) return sobel_directions, sobel_magnitudes
def main(): if not os.path.exists('results'): os.makedirs('results') for filename in os.listdir("data"): for method in ["SSD", "NCC"]: print(filename) ################################################### #### using naive method on smaller .jpg images #### ################################################### if filename.endswith(".jpg"): b, g, r = split_image("data/" + filename) # align the green and red color channels to the blue color channel ag, g_shift = align(g, b) ar, r_shift = align(r, b) # stack the color channels im_out = np.dstack([ar, ag, b]) # save the image skio.imsave("results/" + str.split(filename, ".")[0] + "_" + method + "_g_" + str(g_shift) + \ "_r_" + str(r_shift) + ".jpg", im_out) #################################################### #### using pyramid method on larger .tif images #### #################################################### elif filename.endswith(".tif"): b, g, r = split_image("data/" + filename) # apply sobel edge filter (improvement is neglibile for all images other than emir.tiff) b_s = np.abs(sobel(b)) g_s = np.abs(sobel(g)) r_s = np.abs(sobel(r)) _, g_shift = pyramid(g_s, b_s) _, r_shift = pyramid(r_s, b_s) # apply displacement shift to original color channels ag = np.roll(g, g_shift, (0, 1)) ar = np.roll(r, r_shift, (0, 1)) # stack the color channels im_out = np.dstack([ar, ag, b]) # save the image skio.imsave("results/" + str.split(filename, ".")[0] + "_" + method + "_g_" + str(g_shift) + \ "_r_" + str(r_shift) + ".jpg", im_out)
def gen_data(self, row, size=30): gray = self.gray_padded_image binar = self.bin_padded_image self.my_dict = {'gray_pixel_value': [], 'mean_gray_pixel_value': [], 'bin_percentage_colored': [], 'sobel_gradient': [], 'label': []} for r in range(30, 220): row_idx = r for c in range(230, 255): col_idx = c self.plot_frame(gray, row_idx, col_idx, size) self.plot_frame(binar, row_idx, col_idx, size) plt.show() gray_window = self.gray_padded_image[row_idx:row_idx+size, col_idx:col_idx+size] bin_window = self.bin_padded_image[row_idx:row_idx+size, col_idx:col_idx+size] gray_pixel_value = gray_window[15, 15] bin_pixel_value = bin_window[15, 15] mean_pixel_value = np.mean(gray_window) gray_window_sobel = ndimage.sobel(gray_window, axis=0) colored_percentage = np.count_nonzero(bin_window==0)/(30**2) above_area = np.mean(gray_window_sobel[8:16, 15]) below_area = np.mean(gray_window_sobel[16:23, 15]) sobel_gradient = above_area - -below_area if bin_pixel_value == 0.0: label = self.get_label() self.update_dict(gray_pixel_value, mean_pixel_value, colored_percentage, sobel_gradient, label) self.save_imgs(label, gray_window, bin_window, row_idx, col_idx) print(label) self.df = pd.DataFrame.from_dict(self.my_dict) self.add_label_history() self.save_csv(row_idx)
def plot_frame(self, zoom, row_index, col_index, size): masked_window = np.random.random((zoom.shape[0],zoom.shape[1])) masked_window[row_index:row_index+size, col_index:col_index+size] = 1 masked_window = np.ma.masked_where(masked_window != 1, masked_window) masked_pixel = np.random.random((zoom[row_index:row_index+size, col_index:col_index+size].shape[0],zoom[row_index:row_index+size, col_index:col_index+size].shape[1])) masked_pixel[15,15] = 1 masked_pixel = np.ma.masked_where(masked_pixel != 1, masked_pixel) masked_pixel1 = np.random.random((zoom[row_index:row_index+size, col_index:col_index+size].shape[0],zoom[row_index:row_index+size, col_index:col_index+size].shape[1])) masked_pixel1[8:23, 15] = 1 masked_pixel1 = np.ma.masked_where(masked_pixel1 != 1, masked_pixel1) window = zoom[row_index:row_index+size, col_index:col_index+size] colored_percentage = np.count_nonzero(window==0)/(30**2) pixel_value = window[15, 15] window_sobel = ndimage.sobel(window, axis=0) above_area = np.mean(window_sobel[8:16, 15]) below_area = np.mean(window_sobel[16:23, 15]) sobel_value = window_sobel[15, 15] # Overlay the two images fig, ax = plt.subplots(1, 3) ax.ravel() ax[0].imshow(zoom, cmap='gray') ax[0].imshow(masked_window, cmap='prism', interpolation='none') # ax[0].imshow(masked_pixel, cmap=cm.jet, interpolation='none') ax[0].set_title('Colored: {}'.format(round(colored_percentage, 2))) ax[1].imshow(window, cmap='gray') ax[1].imshow(masked_pixel, cmap='prism', interpolation='none') ax[1].set_title('Pixel Value: {}'.format(pixel_value)) ax[2].imshow(window_sobel) ax[2].imshow(masked_pixel1, cmap='jet') ax[2].imshow(masked_pixel, cmap='prism', interpolation='none')
def showDrawing(event): threshold = thresholdScale.get() startTime = time.time() imageArray = np.array(dataSet[imageScale.get(), :, :]) dimension = imageArray[0].size imageArray = (imageArray - imageArray.min()) / ( imageArray.max() - imageArray.min()) * threshold / 255 maskArray = np.array( file.get(list(file.items())[0][0])[imageScale.get(), ...]) maskArray = sobel(maskArray) maskArray = np.sqrt(maskArray**2) minmaxLabel.configure(text="Min = " + str(imageArray.min()) + " Max = " + str(imageArray.max())) maskArray = (maskArray - maskArray.min()) / (maskArray.max() - maskArray.min()) if not applyMask.get(): finalArray = np.rint(imageArray * 255) else: imageArray = np.rint(imageArray * 255) rgbArray = np.zeros((dimension, dimension, 3), 'uint8') rgbArray[..., 0] = imageArray + maskArray * (255 - thresholdScale.get()) rgbArray[..., 1] = imageArray rgbArray[..., 2] = imageArray finalArray = rgbArray img = Image.fromarray(finalArray) img = img.resize((800, 800)) img = ImageTk.PhotoImage(master=window, image=img) window.dontDeleteMePlease = img canvas.create_image(pixelWidth, pixelHeight, image=img) print(time.time() - startTime)
def sobel(img_array): ''' image: image to convert with Sobel filter ''' col = np.zeros((img_array.shape)) for i in range(img_array.shape[0]): # print(i) img = img_array[i] dx = ndimage.sobel(img, 0) # horizontal derivative dy = ndimage.sobel(img, 1) # vertical derivative mag = np.hypot(dx, dy) # magnitude, equivalent to sqrt(dx**2 + dy**2) mag *= 255.0 / np.max(mag) # normalize (Q&D) col[i] = mag return col
def harris_feature(im, region_size=5, to_return='harris', scale=0.05): """ Harris-motivated feature detection on a d-dimensional image. Parameters --------- im region_size to_return : {'harris','matrix','trace-determinant'} """ ndim = im.ndim #1. Gradient of image grads = [nd.sobel(im, axis=i) for i in range(ndim)] #2. Corner response matrix matrix = np.zeros((ndim, ndim) + im.shape) for a in range(ndim): for b in range(ndim): matrix[a, b] = nd.filters.gaussian_filter(grads[a] * grads[b], region_size) if to_return == 'matrix': return matrix #3. Trace, determinant trc = np.trace(matrix, axis1=0, axis2=1) det = np.linalg.det(matrix.T).T if to_return == 'trace-determinant': return trc, det else: #4. Harris detector: harris = det - scale * trc * trc return harris
def generate_features3D(image, sigma): # generate range of sigmas sigmas = range(sigma[0], sigma[1] + 1) f_values = image.flatten() f_sobel = scp.sobel(image).flatten() f_gauss = np.zeros([len(image.flatten()), len(sigmas)]) f_dog = np.zeros([len(image.flatten()), len(sigmas) - 1]) idx = 0 for s in range(sigma[0], sigma[1] + 1): # consider only Re part for gabor filter f_gauss[:, idx] = scp.gaussian_filter(image, s).flatten() if (idx != 0): f_dog[:, idx - 1] = f_gauss[:, idx] - f_gauss[:, idx - 1] idx += 1 f_max = scp.maximum_filter(image, sigma[0]).flatten() f_median = scp.median_filter(image, sigma[0]).flatten() # run median only with the minimal sigma f_laplacian = scp.laplace(image).flatten() # full set of features f_set = np.vstack([f_values, f_max, f_median, f_sobel, f_gauss.T, f_dog.T, f_laplacian]).T return f_set
def demo(image_path, model_path, device): import os import numpy as np from matplotlib import pyplot as plt from scipy.ndimage import sobel from PIL import Image import torch from dextr.model import DextrModel if device is None: if torch.cuda.is_available(): device = 'cuda:0' else: device = 'cpu' torch_device = torch.device(device) # Load model if model_path is not None: # A model path was provided; load it dextr_model = torch.load(model_path, map_location=torch_device) else: # No model path; download (if necessary) and load a pre-trained model dextr_model = DextrModel.pascalvoc_resunet101().to(torch_device) dextr_model.eval() # Load image if image_path is None: image_path = os.path.join('images', 'giraffes_1.jpg') image = Image.open(image_path) img_arr = np.array(image) / 255.0 plt.ion() plt.axis('off') plt.imshow(img_arr) plt.title('Click four extreme points of the object\nHit enter when done') def overlay(image, colour, mask, alpha): alpha_img = mask * alpha return image * (1 - alpha_img[:, :, None] ) + colour[None, None, :] * alpha_img[:, :, None] while True: # Get points from user extreme_points = np.array(plt.ginput(4, timeout=0)) if len(extreme_points) < 4: # Less than four points; quit break # Predict masks (points come from matplotlib in [x,y] order; this must be flipped) masks = dextr_model.predict([image], extreme_points[None, :, ::-1]) mask_bin = masks[0] >= 0.5 edges = sobel(mask_bin.astype(float)) != 0 img_arr = overlay(img_arr, np.array([0.0, 1.0, 0.0]), mask_bin, 0.3) img_arr = overlay(img_arr, np.array([1.0, 1.0, 0.0]), edges, 0.3) plt.imshow(img_arr) plt.plot(extreme_points[:, 0], extreme_points[:, 1], 'gx')
def main(file1, file2): mat = cv2.imread(file1) nmat = cv2.imread(file2) blurmat = cv2.GaussianBlur(mat, (5, 5), 3) nblurmat = cv2.GaussianBlur(nmat, (25, 25), 50) sx = ndimage.sobel(mat, axis=0, mode='constant') sy = ndimage.sobel(mat, axis=1, mode='constant') sobel = np.hypot(sx, sy).mean() print(sobel) sx = ndimage.sobel(nmat, axis=0, mode='constant') sy = ndimage.sobel(nmat, axis=1, mode='constant') sobel = np.hypot(sx, sy).mean() print(sobel) cv2.imshow("", blurmat) cv2.imshow("n", nblurmat) cv2.waitKey(0)
def featArray(data, timesObs): freqs1 = n.linspace(100, 200, n.shape(data)[1]) sh = n.shape(data) CW1mean = n.zeros_like(data) for i in range(CW1mean.shape[1]): CW1 = cwt(n.abs(data[:, i]), haar, n.arange(1, 10, 1)) CW1 = n.ma.masked_where(CW1 == 0, CW1) CW1mean[:, i] = n.ma.mean(n.abs(CW1), 0) CT1mean = n.zeros_like(data) for j in range(CW1mean.shape[0]): CT1 = cwt(data[j, :], signal.morlet, n.arange(1, 3, 1)) CT1 = n.ma.masked_where(CT1 == 0, CT1) CT1mean[j, :] = n.mean(n.abs(CT1), 0) processed = ndimage.sobel(n.abs(data)) X1 = n.zeros((sh[0] * sh[1], 5)) X1[:, 0] = (n.real(data)).reshape(sh[0] * sh[1]) X1[:, 1] = (n.imag(data)).reshape(sh[0] * sh[1]) #X1[:,2] = n.abs(CW1mean).reshape(sh[0]*sh[1]) X1[:, 2] = n.abs(CT1mean).reshape(sh[0] * sh[1]) # X1[:,2] = n.log10(n.abs(processed)).reshape(sh[0]*sh[1]) X1[:, 3] = (n.array([timesObs] * sh[1])).reshape(sh[0] * sh[1]) X1[:, 4] = (n.array([freqs1] * sh[0])).reshape(sh[0] * sh[1]) X1 = n.nan_to_num(X1) # for m in range(n.shape(X1)[1]): # X1[:,m] = X1[:,m]/X1[:,m].max() X1 = normalize(X1, norm='l2', axis=1) X1 = n.nan_to_num(X1) return X1
def _compute_derivatives(image, mode='constant', cval=0): """Compute derivatives in axis directions using the Sobel operator. Parameters ---------- image : ndarray Input image. mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional How to handle values outside the image borders. cval : float, optional Used in conjunction with mode 'constant', the value outside the image boundaries. Returns ------- derivatives : list of ndarray Derivatives in each axis direction. """ derivatives = [ ndi.sobel(image, axis=i, mode=mode, cval=cval) for i in range(image.ndim) ] return derivatives
def func_write_gradmag_img(imf, fout): im_dy = ndimage.sobel(imf, 0) im_dx = ndimage.sobel(imf, 1) mmin = np.minimum(np.min(im_dy), np.min(im_dx)) im_dy -= mmin im_dx -= mmin mmax = np.maximum(np.max(im_dy), np.max(im_dx)) im_dy = (im_dy / mmax)*255.0 im_dx = (im_dx / mmax)*255.0 im_dmag = np.sqrt(im_dx**2 + im_dy**2) im_dmag -= np.min(im_dmag) im_dmag = (im_dmag / np.max(im_dmag) )*255 im_d = np.stack((im_dmag, im_dx, im_dy), axis=2) im_d = im_d.astype(np.uint8) plt.imsave(fout, im_d)
def createPoints_sober(image1): Ix = ndimage.sobel(image1, 0) # pre-calculate the derivatives Iy = ndimage.sobel(image1, 1) Iz = ndimage.sobel(image1, 2) Ix2 = Ix * Ix Iy2 = Iy * Iy Iz2 = Iz * Iz Ixy = Ix * Iy Ixz = Ix * Iz Iyz = Iy * Iz k = 0.06 I_feature = (Ix2 * Iy2 * Iz2 + Ixy * Iyz * Ixz + Ixy * Iyz * Ixz - Ixy * Ixy * Iz2 - Iyz * Iyz * Ix2 - Ixz * Ixz * Iy2) - k * (Ix2 + Iy2 + Iz2)**3 return I_feature
def sample_points(im, step, mask=None, maxiter=20): shape = np.asarray(im.shape, dtype=int) steps = np.ones(im.ndim, dtype=int) * step ## make regular grid grid = np.zeros(shape) grid[::steps[0], ::steps[1], ::steps[2]] = 1 if mask is not None: grid = grid * mask points0 = np.argwhere(grid) points = points0 ## move points fim = ndimage.gaussian_filter(im.astype(float), sigma=1.0) emap = np.sqrt( np.sum([ndimage.sobel(fim, axis=d)**2 for d in range(im.ndim)], axis=0)) gradient = np.asarray(np.gradient(emap)).astype(float) for i in range(np.max(steps) / 2): axes = i < (steps / 2) if i >= maxiter: break dp = gradient[(slice(None), ) + tuple(points.T)].T dp = dp / np.c_[np.maximum(np.max(np.abs(dp), axis=1), 1e-10)] dp = (dp + 0.5 * (-1)**(dp < 0)).astype(int) for axe in np.where(axes)[0]: points[:, axe] = (points - dp)[:, axe] points = points[np.all(points > 0, axis=1) & np.all(points < shape, axis=1)] return points
def edge_filters(self): ''' Plot five edge-filters (kernels) in grayscale ''' self.gray = rgb2gray(self.im) self.edges = { 'Original': self.im, 'Grayscale': self.gray, 'Sobel': ndimage.sobel(self.gray), 'Prewitt': ndimage.prewitt(self.gray), 'Laplacian': ndimage.laplace(self.gray, mode='reflect'), 'LoG': ndimage.gaussian_laplace(self.gray, sigma=1, mode='reflect') } fig, axes = plt.subplots(2, 3, figsize=(18, 10)) axs = iter(axes.ravel()) for name, edge in self.edges.items(): ax = next(axs) ax.imshow(edge, cmap='gray') ax.set_title(name) fig.tight_layout() plt.savefig('.'.join(FNAME.split('.')[:-1]) + '_processed.png')
def suction_cam_callback(image): assert image.encoding == 'bgra8' raw = np.fromstring(image.data, dtype=np.uint8).reshape( (image.height, image.width, 4)) colorized = np.stack((raw[:, :, 2], raw[:, :, 1], raw[:, :, 0]), axis=2) grayscale = rgb_to_grayscale(raw[:, :, 2], raw[:, :, 1], raw[:, :, 0]).astype(np.uint8) blurred = signal.fftconvolve(grayscale, GAUSSIAN_KERNEL, mode='same') edges = ndimage.sobel(blurred) dev_mean = np.mean(edges) dev_bitmap = 1 - (np.abs(edges - dev_mean) > 10 * dev_mean).astype(np.int) gray_mean = np.mean(grayscale) gray_bitmap = ((grayscale - gray_mean) > 3 * gray_mean).astype(np.int) combined_bitmap = dev_bitmap / dev_bitmap.max( ) + 2 * gray_bitmap / gray_bitmap.max() # saturated = np.minimum(0.97*256, np.maximum(0.03*256, blurred)) # - 0.05*256)/0.9 # b, a = signal.butter(2, 0.001, 'highpass') # deglared = signal.lfilter(b, a, grayscale) # deglared = grayscale # processed = blurred global SHOWN if not SHOWN: SHOWN = True # plt.imshow(colorized) plt.imshow(combined_bitmap, cmap='gray') plt.show()
def sobel_filter_image(self): self.restore_original() image = misc.fromimage(self._new_image) image = image.astype('int32') dx = ndimage.sobel(image, 1) # horizontal derivative dy = ndimage.sobel(image, 0) # vertical derivative mag = np.hypot(dx, dy) # magnitude mag *= 255.0 / np.max(mag) # normalize (Q&D) print(222222, mag) self._new_image = Image.fromarray(mag) show_preconfigured_hist(self._new_image.getdata()) self._view.update_ui(self._initial_image, self._new_image)
def sobel(self): """ Performs the sobel operator in x and y direction and combines the output. The output is normalized by using contrast_stretch. """ gray = ImageUtils.rgb2grey_fixed(self.image_array) / 255 sobel_x = ndimage.sobel(gray, 0) sobel_y = ndimage.sobel(gray, 1) sobel_xy = numpy.hypot(sobel_x, sobel_y) sobel_xy = ImageUtils.normalize_intensity_p298(sobel_xy) # sobel_xy *= (255 / sobel_xy.max()) sobel_xy = ImageUtils.rgb2grey_fixed(sobel_xy) self._update_image_data(sobel_xy, "Sobel edge-detection")
def genSimpleFeatures(volume): return [ ndimage.prewitt(volume), ndimage.sobel(volume) ] + \ genBlurSharpen(volume, 2.0) + \ genBlurSharpen(volume, 5.0)
def _get_frame_entropy((i, capture, sobelized)): frame = capture.get_frame(i, True).astype('float') if sobelized: frame = ndimage.median_filter(frame, 3) dx = ndimage.sobel(frame, 0) # horizontal derivative dy = ndimage.sobel(frame, 1) # vertical derivative frame = numpy.hypot(dx, dy) # magnitude frame *= 255.0 / numpy.max(frame) # normalize (Q&D) histogram = numpy.histogram(frame, bins=256)[0] histogram_length = sum(histogram) samples_probability = [float(h) / histogram_length for h in histogram] entropy = -sum([p * math.log(p, 2) for p in samples_probability if p != 0]) return entropy
def run(self, ips, snap, img, para=None): imgs = ips.imgs gradient = np.zeros(imgs.shape, dtype=np.float32) gradient += ndimg.sobel(imgs, axis=0, output=np.float32)**2 gradient += ndimg.sobel(imgs, axis=1, output=np.float32)**2 gradient += ndimg.sobel(imgs, axis=2, output=np.float32)**2 gradient **= 0.5 msk = np.zeros(imgs.shape, dtype=np.uint8) msk[imgs > para['thr2']] = 1 msk[imgs < para['thr1']] = 2 #rst = watershed(gradient, msk) rst = watershed(gradient, msk.astype(np.uint16)) imgs[:] = (rst == 1) * 255 ips.lut = self.buflut
def scrub(self, size=30): gray = self.gray_image binar = self.bin_image visit_list = np.argwhere(gray <= (self.whitespace - 5)) last_3 = [-1, -1, -1] # self.save_fig(os.path.join(RESULTS_DIRECTORY, '{}_before.png'.format(self.figname))) for x in visit_list: i = x[0] - 15 j = x[1] - 15 print(i, j) # self.plot_frame(gray, i, j, size) # self.plot_frame(binar, i, j, size) # plt.show() gray_window = gray[i:i + size, j:j + size] bin_window = binar[i:i + size, j:j + size] gray_window_sobel = ndimage.sobel(gray_window, axis=0) mean_pixel_value = np.mean(gray_window) gray_pixel_value = gray_window[15, 15] colored_percentage = np.count_nonzero(bin_window == 0) / (30**2) above_area = np.mean(gray_window_sobel[8:16, 15]) below_area = np.mean(gray_window_sobel[16:23, 15]) sobel_gradient = above_area - -below_area # print('Pix Val: {}, Mean Pix Val: {}, Bin Colored: {}, Sobel Gradient: {}, Last 3: {}'.format(gray_pixel_value, mean_pixel_value, colored_percentage, sobel_gradient, last_3)) prediction = self.predict(gray_pixel_value, mean_pixel_value, colored_percentage, sobel_gradient, last_3) print(prediction) self.alter_image(i, j, prediction) last_3.pop() last_3.insert(0, prediction) self.save_fig()
def paintLayer(self, ref_img, radius): S = [] self.cur_nparray = self.Surface2array(self.canvas) self.ref_nparray = self.Image2array(ref_img) D = self.img_diff(self.cur_nparray, self.ref_nparray) grid = int(self.style.grid_size * radius) width = self.canvas.get_width() // grid * grid height = self.canvas.get_height() // grid * grid ref_l = self.Image2array(ref_img.convert(mode='I')) self.gradient_x = ndimage.sobel(ref_l, 0) self.gradient_y = ndimage.sobel(ref_l, 1) cnt = 0 for x in range(0, width, grid): for y in range(0, height, grid): M = D[x:x + grid, y:y + grid] areaError = M.sum() / (grid * grid) if areaError > self.style.threshold: cnt += 1 x1, y1 = np.unravel_index(np.argmax(M), M.shape) s = self.makeSplineStroke(x1 + x, y1 + y, radius) S.append(s) random.shuffle(S) print("radius=%d : stroke %d" % (radius, cnt)) for s in S: self.context.set_line_width( max( self.context.device_to_user_distance( 2 * radius, 2 * radius))) stroke_color = self.ref_nparray[s[0]] / 255 self.context.set_source_rgb(stroke_color[0], stroke_color[1], stroke_color[2]) self.context.move_to(s[0][0] / self.canvas.get_width(), s[0][1] / self.canvas.get_height()) for i in range(1, len(s)): self.context.line_to(s[i][0] / self.canvas.get_width(), s[i][1] / self.canvas.get_height()) self.context.move_to(s[i][0] / self.canvas.get_width(), s[i][1] / self.canvas.get_height()) self.context.close_path() self.context.stroke()
def single_scale_align_edge(img0, img1, offset): """Single scale aligns two images using edge detection Similar to single_scale_align(), but also runs an edge detection filter on top of the image before the search starts. Aligns img1 to img0, which acts like the anchor. We exhaustively search over a window of possible displacements given by the offset (e.g. [-15,15] pixels), then score each offset image using some image matching metric (the sum of squared distances) and finally choose the displacement with the best score. Args: img0: Anchor image to be aligned to img1: Image to be aligned offset: List of two offset ranges. First is for green, second is for red. Each offset range contains a x range first, then a y range Returns: A single aligned img and the alignment array, which contains alignemtn of green (index 0) and alignement of red (index 1) """ min_score = float("inf") final_offset = [] # Edge detection edge_sobel_0 = ndimage.sobel(img0, 1) # horizontal derivative edge_sobel_1 = ndimage.sobel(img1, 1) # horizontal derivative array0 = np.array(edge_sobel_0) # imshow(img1) imshow(concat_n_images([img0, edge_sobel_0, img1, edge_sobel_1])) for x in range(offset[0][0], offset[0][1]): for y in range(offset[1][0], offset[1][1]): roll = np.roll(np.roll(edge_sobel_1, y, axis=0), x, axis=1) score = np.sum(np.power(array0 - np.array(roll), 2)) if min_score >= score: min_score = score final_offset = (x, y) aligned = np.roll(np.roll(img1, final_offset[1], 0), final_offset[0], 1) #print("edge align offset", final_offset) return (aligned, final_offset)
def LucasKanade(It, It1, rect, p0=np.zeros(2)): # Input: # It: template image # It1: Current image # rect: Current position of the car # (top left, bot right coordinates) # p0: Initial movement vector [dp_x0, dp_y0] # Output: # p: movement vector [dp_x, dp_y] # Put your implementation here dp = np.array([100.0, 100.0]) p = p0 converge = False while (converge == False): # Warp the image and compute error x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3] p_x, p_y = p[0], p[1] w = shift(It1, (p_y, p_x), It1.dtype) It_w = w[y1:y2 + 1, x1:x2 + 1] er = It_w - It er = er.flatten() # 3) Compute the gradient and warp them, then demarcate the border dx = ndimage.sobel(It1, 1) # horizontal derivative dy = ndimage.sobel(It1, 0) # vertical derivative w_dx = shift(dx, (p_y, p_x), It1.dtype) It_dx = w_dx[y1:y2 + 1, x1:x2 + 1] It_dx = It_dx.flatten() w_dy = shift(dy, (p_y, p_x), It1.dtype) It_dy = w_dy[y1:y2 + 1, x1:x2 + 1] It_dy = It_dy.flatten() vstack = np.vstack((It_dx, It_dy)) er_dec = np.matmul(vstack, er) # Error of steepest descent # Compute hessian and find delta p (dp) l_mag = np.matmul(vstack.T, np.eye(2)) Hessian = np.matmul(l_mag.T, l_mag) dp = np.matmul(inv(Hessian), er_dec) p = dp + p if (np.linalg.norm(dp) < 0.01): converge = True return p
def principal_curvature(image): rows, cols = image.shape gx = ndimage.sobel(image, axis=0, mode='constant') gy = ndimage.sobel(image, axis=1, mode='constant') gxx = ndimage.sobel(gx, axis=0, mode='constant') gxy = ndimage.sobel(gx, axis=1, mode='constant') # same as gyx gyy = ndimage.sobel(gy, axis=1, mode='constant') lamdaplus = np.zeros(image.shape) for i in range(rows): for j in range(cols): lamdaplus[i, j] = lamdafind([gxx[i, j], gxy[i, j], gxy[i, j], gyy[i, j]]) return img_as_float(lamdaplus)
def Wateredge(Water, cellsize, buffer): # Lets use a sobel filter to find the water edges sx = ndimage.sobel(Water, axis=0, mode='constant') sy = ndimage.sobel(Water, axis=1, mode='constant') sob = np.hypot(sx, sy) borders = sob >= 1 # Use 2D convolution to create a buffer around the water edges # first calculate number of cells needed for buffer based on cell size and desired buffer (20m) buf = int(buffer / cellsize) kernel = np.ones((buf, buf)) result = np.int64(convolve2d(borders, kernel, mode='same') > 0) # Delete the water part so only the riperian zone is left riparian = result - Water return riparian