a = len(src_pts1)/2 a = int (a) c = src_pts1.ravel() d = dst_pts1.ravel() canvas1 = gray1.copy() canvas2 = gray2.copy() for k in range(1,a): cv2.circle(canvas1, (c[2*k],c[2*k-1]), 80, (255, 255, 255), -1) cv2.circle(canvas2, (d[2*k],d[2*k-1]), 80, (255, 255, 255), -1) blurred1 = cv2.GaussianBlur(canvas1, (9, 9),0) blurred2 = cv2.GaussianBlur(canvas2, (9, 9),0) gradX1 = cv2.Sobel(blurred1, ddepth=cv2.CV_32F, dx=1, dy=0) gradY1 = cv2.Sobel(blurred1, ddepth=cv2.CV_32F, dx=0, dy=1) gradient1 = cv2.subtract(gradX1, gradY1) gradient1 = cv2.convertScaleAbs(gradient1) gradX2 = cv2.Sobel(blurred2, ddepth=cv2.CV_32F, dx=1, dy=0) gradY2 = cv2.Sobel(blurred2, ddepth=cv2.CV_32F, dx=0, dy=1) gradient2 = cv2.subtract(gradX2, gradY2) gradient2 = cv2.convertScaleAbs(gradient2) blurred = cv2.GaussianBlur(gradient1, (9, 9),0) (_, thresh) = cv2.threshold(blurred, 225, 0, 4) (_, thresh) = cv2.threshold(thresh, 30, 0, 3) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25, 25))
def gradient(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) sobelx = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=5) sobely = cv2.Sobel(gray, cv2.CV_8U, 0, 1, ksize=5) return sobelx, sobely
def circle_dectet(image): penalty = [] #t1 = time.time() shrink = 2 image = cv2.resize(image, (math.ceil( image.shape[1] / shrink), math.ceil(image.shape[0] / shrink))) th = 30 # 边缘检测后大于th的才算边界 gray = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) x = cv2.Sobel(gray, cv2.CV_16S, 1, 0) # x方向梯度 y = cv2.Sobel(gray, cv2.CV_16S, 0, 1) # y方向梯度 absX = cv2.convertScaleAbs(x) # 转回uint8 absY = cv2.convertScaleAbs(y) edges = cv2.addWeighted(absX, 0.5, absY, 0.5, 0) # 各0.5的权重将两个梯度叠加 #t2 = time.time() #print('t1:',t2-t1) # edges = skeletonize(edges / 255) # print(type(edges)) # edges = edges.astype('uint8')*255 # st = time.time() # lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 255, minLineLength=min(gray.shape[0], gray.shape[1])/3, # maxLineGap=20) # x_all = [] # y_all = [] # if lines is not None: # for line in lines: # x1, y1, x2, y2 = line[0] # cv2.line(edges, (x1, y1), (x2, y2), (0, 0, 0), 1) # 画线 # # x_all.append(x1) # # x_all.append(x2) # # y_all.append(y1) # # y_all.append(y2) # # # plt.imshow(img_line) # # # plt.show() # REMOVE THE NOISE # area_obj = cv2.contourArea(d) # if area_obj / (w*h) >0.2: # edges[y:y+h,x:x+w] = np.zeros((h,w)) # print('4', time.time() - st) shrink2 = 2 edges = cv2.resize(edges, (math.ceil( edges.shape[1] / shrink2), math.ceil(edges.shape[0] / shrink2))) dst, edges = cv2.threshold(edges, th, 255, cv2.THRESH_BINARY) # 大于th的赋值255(白色) kernel = np.ones((3, 3), np.uint8) edges = cv2.dilate(edges, kernel, iterations=1) # edges = cv2.erode(edges,kernel,iterations= 2 ) contours_person, hier = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for d in contours_person: x, y, w, h = cv2.boundingRect(d) if d.shape[0] < math.ceil(100 / shrink2): edges[y:y + h, x:x + w] = np.zeros((h, w)) t3 = time.time() #print('t2:',t3-t2) # 霍夫圆变换 # dp累加器分辨率与图像分辨率的反比默认1.5,取值范围0.1-10,越小越准 dp = 2 # minDist检测到的圆心之间的最小距离。如果参数太小,则除了真实的圆圈之外,还可能会错误地检测到多个邻居圆圈。 如果太大,可能会错过一些圈子。取值范围10-500 minDist = 100 circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, dp, minDist, param1=45, param2=10, minRadius=math.ceil(60 / (shrink * shrink2)), maxRadius=math.ceil(300 / (shrink * shrink2))) circles = np.uint16(np.around(circles)) if circles is not None: circles = np.uint16(np.around(circles)) else: return 0, [0, 0, 0, 0] t4 = time.time() #print('t3:',t4-t3) # c = [] # for circle in circles[0]: # # circle = circles[0] # cir = np.zeros((edges.shape[0],edges.shape[1])) # # 绘制外圆 # cv2.circle(cir, (circle[0], circle[1]), circle[2], (255, 255, 255), 1) # # 绘制圆心 # # cv2.circle(cir, (circle[0], circle[1]), 2, (255, 255, 255), 2) # cir = cv2.bitwise_and(cir,cir,mask=edges) # c.append(len(cir[cir == 255])/(2*math.pi*circle[2])) # index = c.index(max(c)) index = 0 #只取中间的圆 for circle in circles[0]: if abs(circle[0] - edges.shape[1] // 2) > 20: if index < circles.shape[1] - 1: #print(circles.shape[1]) index = index + 1 else: index = 0 else: penalty.append([ circle[0] * shrink * shrink2, circle[1] * shrink * shrink2, circle[2] * shrink * shrink2, shrink * shrink2 ]) penalty.append([ circles[0][index][0] * shrink * shrink2, circles[0][index][1] * shrink * shrink2, circles[0][index][2] * shrink * shrink2, shrink * shrink2 ]) # cv2.imshow('edge', edges) return 1, penalty
import cv2 o = cv2.imread('yuko.jpg', cv2.IMREAD_GRAYSCALE) Sobelx = cv2.Sobel(o, cv2.CV_64F, 1, 0) Sobely = cv2.Sobel(o, cv2.CV_64F, 0, 1) Sobelx = cv2.convertScaleAbs(Sobelx) Sobely = cv2.convertScaleAbs(Sobely) Sobelxy = cv2.addWeighted(Sobelx, 0.5, Sobely, 0.5, 0) Sobelxyll = cv2.Sobel(o, cv2.CV_64F, 1, 1) Sobelxyll = cv2.convertScaleAbs(Sobelxyll) cv2.imshow('o', o) cv2.imshow('xy', Sobelxy) cv2.imshow('xyll', Sobelxyll) cv2.waitKey() cv2.destroyAllWindows()
def process_image(self, input_image, output_image): width = input_image.shape[1] height = input_image.shape[0] current_page = self.ui.mainTabs.currentIndex() print("current_page_index: ", current_page) if current_page == BILATERAL_FILTER_PAGE: output_image = cv2.bilateralFilter(input_image, self.ui.bilateral_dia_spin.value(), self.ui.bilateral_sigma_color_spin.value(), self.ui.bilateral_sigma_space_spin.value(), self.ui.bilateral_border_type_combo.currentIndex()) return output_image elif current_page == BLUR_FILTER_PAGE: print("kernel: ", self.ui.blur_kernel_spin.value()) # print("anchor: ", self.ui.blur_anchor_x_spin.value(), self.ui_blur anchor_y_spin.value()) output_image = cv2.blur(input_image, (self.ui.blur_kernel_spin.value(), self.ui.blur_kernel_spin.value()), output_image, (self.ui.blur_anchor_x_spin.value(), self.ui.blur_anchor_y_spin.value()), self.ui.blur_border_type_combo.currentIndex()) return output_image elif current_page == BOX_FILTER_PAGE: print("normalize: ", self.ui.box_normalize_check.isChecked()) output_image = cv2.boxFilter(src=input_image, ddepth=self.ui.box_depth_spin.value(), ksize=(self.ui.box_kernel_spin.value(), self.ui.box_kernel_spin.value()), dst=output_image, anchor=(self.ui.box_anchor_x_spin.value(), self.ui.box_anchor_y_spin.value()), normalize=self.ui.box_normalize_check.isChecked(), borderType=self.ui.box_border_type_combo.currentIndex()) return output_image elif current_page == GAUSSIAN_FILTER_PAGE: output_image = cv2.GaussianBlur(input_image, ksize=(self.ui.gaussian_kernel_spin.value(), self.ui.gaussian_kernel_spin.value()), sigmaX=self.ui.gaussian_sig_x_spin.value(), dst=output_image, sigmaY=self.ui.gaussian_sig_y_spin.value(), borderType=self.ui.gaussian_border_type_combo.currentIndex()) return output_image elif current_page == MEDIAN_FILTER_PAGE: output_image = cv2.medianBlur(input_image, self.ui.median_kernel_spin.value()) return output_image elif current_page == FILTER2D_FILTER_PAGE: f2dkernel = (0, 1.5, 0, 1.5, -6, 1.5, 0, 1.5, 0) output_image = cv2.filter2D(input_image, output_image, -1, f2dkernel, (-1, -1)) return output_image elif current_page == DERIVATIVES_FILTER_PAGE: if self.ui.derivatives_sobel_radio.isChecked(): cv2.Sobel(src=input_image, ddepth=self.ui.derivatives_ddepth_spin.value(), dx=self.ui.derivatives_dx_spin.value(), dy=self.ui.derivatives_dy_spin.value(), dst=output_image, ksize=self.ui.derivatives_kernel_spin.value(), scale=self.ui.derivatives_scale_spin.value(), delta=self.ui.derivatives_delta_spin.value(), borderType=self.ui.derivatives_border_type_combo.currentIndex()) elif self.ui.derivatives_scharr_radio.isChecked(): cv2.Scharr(src=input_image, ddepth=self.ui.derivatives_ddepth_spin.value(), dx=self.ui.derivatives_dx_spin.value(), dy=self.ui.derivatives_dy_spin.value(), dst=output_image, ksize=self.derivatives_kernel_spin.value(), scale=self.ui.derivatives_scale_spin.value(), delta=self.ui.derivatives_delta_spin.value(), borderType=self.ui.derivatives_border_type_combo.currentIndex()) elif self.ui.derivatives_laplacian_radio.isChecked(): output_image = cv2.Laplacian(src=input_image, ddepth=self.ui.derivatives_ddepth_spin.value(), dst=output_image, ksize=self.ui.derivatives_kernel_spin.value(), scale=self.ui.derivatives_scale_spin.value(), delta=self.ui.derivatives_delta_spin.value(), borderType=self.ui.derivatives_border_type_combo.currentIndex()) return output_image elif current_page == MORPH_FILTER_PAGE: if self.ui.morph_erode_radio.isChecked(): cv2.erode(input_image, output_image, cv2.getStructuringElement(self.ui.morph_shape_combo.currentIndex(), (5, 5)), (-1, -1), self.ui.morph_iteration_spin.value()) elif self.ui.morph_dilate_radio.isChecked(): cv2.dilate(input_image, output_image, cv2.getStructingElement(self.ui.morph_shape_combo.currentIndex(), (5,5)), (-1,-1), self.ui.morph_iteration_spin.value()) elif self.ui.morph_morph_radio.isChecked(): m_anchor = (self.ui.morph_anchor_x_spin.value(), self.ui.morph_anchor_y_spin.value()) m_kernel = self.ui.morph_kernel_spin.value(), self.ui.morph_kernel_spin.value() output_image = cv2.morphologyEx(input_image, self.ui.morph_type_combo.currentIndex(), cv2.getStructuringElement(self.ui.morph_shape_combo.currentIndex(), m_kernel), anchor=m_anchor, iterations=self.ui.morph_iteration_spin.value(), borderType=self.ui.morph_border_type_combo.currentIndex() # borderValue= ) # return outputImage return output_image
def filteringHoughLines(img, ori_img, houghLinesArrary): filteredHoughLines = [] # First gradients along x & y direction and then calculate magnitude dx = cv2.Sobel(img, cv2.CV_32F, 1, 0) # Gradient along x direction dy = cv2.Sobel(img, cv2.CV_32F, 0, 1) # Gradient along y direction magnitude = np.absolute(cv2.magnitude(dx, dy)) # Magnitude of the image meanValue = np.mean(magnitude) for line in houghLinesArrary: points = bresenham_line( (line[0][0], line[0][1]), (line[1][0], line[1][1])) # Call Bresenham algorithm to get the points tempPoint1 = [(line[0][0] - line[1][0]), (line[0][1] - line[1][1]) ] # Store the point for dot product threshold = 0 # Removing the Vertical and Horizontal lines if line[1][0] == 0 or line[0][0] == 0 or line[1][0] - line[0][0] == 0: lineSlope = 1 lineAngle = 0 continue else: lineSlope = (line[1][1] - line[0][1]) / (line[1][0] - line[0][0]) lineAngle = np.absolute(np.arctan(lineSlope) * 180.0 / np.pi) # Check for Vertical Lines if lineAngle > 85 and lineAngle < 95: continue # Check for Horizontal Lines if lineAngle >= 0 and lineAngle < 10: continue # Iterate through all the points for a, b in points: if a < 1200 and b < 1600: xVal = np.absolute(dx[a, b]) # Get gradient for point at X yVal = np.absolute(dy[a, b]) # Get gradient for point at Y # Save point 2 only if the magnitude is greater than the threshold if magnitude[a, b] > 30: tempPoint2 = [xVal, yVal] else: continue else: continue #Get the dot product of the two points resDotProduct = np.dot(tempPoint1, tempPoint2) point1Mag = np.dot(tempPoint1, tempPoint1)**0.5 point2Mag = np.dot(tempPoint2, tempPoint2)**0.5 if point1Mag == 0 or point2Mag == 0: continue # Get the cosine of the angle cosAngle = math.cos(resDotProduct / point2Mag / point1Mag) angleMod = math.degrees(cosAngle) % 360 if angleMod - 180 >= 0: angleMod = 360 - angleMod else: angleMod = angleMod # Increase thresholdValue if the angle is between 85 & 95 if not (angleMod > 85 and angleMod < 95): threshold += 1 # Plot Line on the original image and add it to the array if threshold > 45: cv2.line(ori_img, (line[0][0], line[0][1]), (line[1][0], line[1][1]), (255, 0, 0), 2) filteredHoughLines.append( ((line[0][0], line[0][1]), (line[1][0], line[1][1]))) cv2.imwrite("FilteredHoughLines.jpg", ori_img) return filteredHoughLines
y1, y2, x1, x2 = corner[0][0] - w, corner[0][0] + w, corner[0][ 1] - w, corner[0][1] + w if x1 < 0: x1 = 0 elif x2 > X_len: x2 = X_len elif y1 > Y_len: y1 = Y_len elif y2 < 0: y2 = 0 # print(corner[0][0],corner[0][1]) # print(x1,x2,y1,y2) image_patch = img[x1:x2, y1:y2] #cv2.imshow('patches', image_patch) #cv2.waitKey() gx = cv2.Sobel(image_patch, cv2.CV_32F, 1, 0, ksize=1) gy = cv2.Sobel(image_patch, cv2.CV_32F, 0, 1, ksize=1) # print(gx,gy) mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True) # mag, angle = np.array(mag, dtype=int), np.array(angle, dtype=int) blue_bin = [0] * 9 blue_bin = np.array(blue_bin, dtype=float) green_bin = [0] * 9 green_bin = np.array(green_bin, dtype=float) red_bin = [0] * 9 red_bin = np.array(red_bin, dtype=float) # print(mag) # print("next") # print(angle) # -------------- BGR CHANNEL------------
# @Author : cap # @FileName: myOpencvEdge.py # @Software: PyCharm Community Edition # @introduction: 边缘检测 import cv2 as cv # image = cv.imread('./data/chair.jpg', cv.IMREAD_GRAYSCALE) image = cv.imread('./data/J01_2018.06.17 15_30_49.jpg', cv.IMREAD_GRAYSCALE) image = cv.resize(image, None, fx=0.2, fy=0.2, interpolation=cv.INTER_LINEAR) print(image.shape) print(image.dtype) cv.imshow('chair', image) # 1 索贝尔边缘检测 # 检测水平梯度变化, ksize:卷积框 hor = cv.Sobel(image, cv.CV_64F, 1, 0, ksize=5) cv.imshow('Hor', hor) # 检测垂直梯度变化 ver = cv.Sobel(image, cv.CV_64F, 0, 1, ksize=5) cv.imshow('Ver', ver) # 检测两个方向 hor_ver = cv.Sobel(image, cv.CV_64F, 1, 1, ksize=5) cv.imshow('Hor_Ver', hor_ver) # 2 拉普拉斯边缘检测 lap = cv.Laplacian(image, cv.CV_64F) cv.imshow('Lap', lap) # 3 Canny 50: canny = cv.Canny(image, 50, 240) cv.imshow('Canny', canny)
ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="Path to the image") ap.add_argument("-l", "--lower-angle", type=float, default=175.0, help="Lower orientation angle") ap.add_argument("-u", "--upper-angle", type=float, default=180.0, help="Upper orientation angle") args = vars(ap.parse_args()) # load the image, convert it to grayscale, and display the original # image image = cv2.imread(args["image"]) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.imshow("Original", image) # compute gradients along the X and Y axis, respectively gX = cv2.Sobel(gray, cv2.CV_64F, 1, 0) gY = cv2.Sobel(gray, cv2.CV_64F, 0, 1) # compute the gradient magnitude and orientation respectively mag = np.sqrt((gX ** 2) + (gY ** 2)) orientation = np.arctan2(gY, gX) * (180 / np.pi) % 180 # find all pixels that are within the upper and low angle boundaries idxs = np.where(orientation >= args["lower_angle"], orientation, -1) idxs = np.where(orientation <= args["upper_angle"], idxs, -1) mask = np.zeros(gray.shape, dtype="uint8") mask[idxs > -1] = 255 # show the images cv2.imshow("Mask", mask) cv2.waitKey(0)
import cv2 import numpy as np import matplotlib.pyplot as plt hourseImg = cv2.imread("../../../gallery/logo.png", cv2.IMREAD_GRAYSCALE) # xGrad = cv2.Sobel(hourseImg, cv2.CV_64F, 1, 0, ksize=3) # yGrad = cv2.Sobel(hourseImg, cv2.CV_64F, 0, 1, ksize=3) Grad = cv2.Sobel(hourseImg, cv2.CV_64F, 1, 1, ksize=3) lapGrads = cv2.Laplacian(hourseImg, cv2.CV_64F, ksize=3) edges = cv2.bitwise_or(Grad, lapGrads) canny = cv2.Canny(hourseImg, 100, 200) gradMorph = cv2.morphologyEx(hourseImg, cv2.MORPH_GRADIENT, np.ones((5, 5), dtype=np.uint8)) cv2.imshow("sobel & laplacian", edges) cv2.waitKey(0) cv2.imshow("canny", canny) cv2.waitKey(0) cv2.imshow("morph", gradMorph) cv2.waitKey(0) cv2.destroyAllWindows()
# Problem 2: # For grey histograms: for imagePath in glob.glob("ST2MainHall4" + "/*.jpg"): # extract the image filename (assumed to be unique) filename = imagePath[imagePath.rfind("/") + 1:] # Read the image as grey image image = cv2.imread(imagePath, 0) # Use canny edge detector to select edge points cannyImageMask = cv2.Canny(image, 100, 250) # To create the 8-bit mask cannyImageMask = np.uint8(cannyImageMask) # Apply bitwise and on the image to mask it maskedImage = cv2.bitwise_and(image, image, mask=cannyImageMask) # Compute gradients in x and y direction sobelXDir = cv2.Sobel(maskedImage, cv2.CV_64F, 1, 0, ksize=5) sobelYDir = cv2.Sobel(maskedImage, cv2.CV_64F, 0, 1, ksize=5) # Compute magnitude and theta angle using the gradients magnitude, theta = cv2.cartToPolar(sobelXDir, sobelYDir, angleInDegrees=True) # To turn theta into hist index theta = np.round(np.divide(theta, 10)) theta = np.uint8(theta) # flatten the theta and magnitude arrays flattenedTheta = hist_bins(theta) flattenedMagnitude = hist_bins(magnitude) # Build 36-bin histograms hist, bins = np.histogram(flattenedTheta, range(37), weights=flattenedMagnitude)
import cv2 as cv import numpy as np from matplotlib import pyplot as plt img = cv.imread('sudoku.png', cv.IMREAD_GRAYSCALE) lap = cv.Laplacian(img, cv.CV_64F, ksize=3) lap = np.uint8(np.absolute(lap)) sobelX = cv.Sobel(img, cv.CV_64F, 1, 0, ksize=1) sobelY = cv.Sobel(img, cv.CV_64F, 0, 1, ksize=1) sobelX = np.uint8(np.absolute(sobelX)) sobelY = np.uint8(np.absolute(sobelY)) sobelCombined = cv.bitwise_or(sobelY, sobelX) titles = ['image', 'lap', 'sobelX', 'sobelY', 'sobel'] images = [img, lap, sobelX, sobelY, sobelCombined] for i in range(5): plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray') plt.title(titles[i]) plt.xticks([]), plt.yticks([]) plt.show()
def preprocess(curr_frame, size=(640, 480), thres_condi=0.3): """ function: preprocess(curr_frame[, size=(640, 480)[, thres_condi=0.2]]): 設定 curr_frame 為 size 大小去做預處理 parameter: curr_frame: 當前要處理的幀(一張圖片) size: 將圖片設定成 width * height 大小 (width, height), tuple thres_condi: 設定二值化的條件, 預設為 0.2, 範圍 [0, 1), float method: 1. 先使用找出影片的 ROI 區域 (邊界的灰階值如果在 10 以下則濾掉) 2. 先重新設定 curr_frame 大小 (判斷該影片為直的或橫的, 根據方向不同做出適合的縮放) 3. 轉灰階 4. 高斯濾波 (kernel 大小設定 5*5, SD = 0) 5. Sobel (dx, dy = 1, kernel 大小設定 5*5) 6. 二值化 (f(x) = (max(sobel) - min(sobel)) * thres_condi) (255 if sobel >= f(x) else 0) return: frame: 傳回重塑過後的 3 通道圖片 gray: 傳回重塑過後的灰階圖片 frame_pre: 傳回二值化的圖片 """ # 1. gray = cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY) gray[gray < 10] = 0 contour, hierarchy = cv2.findContours(gray.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) all_area = list() for cnt in contour: area = cv2.contourArea(cnt) all_area.append(area) index = all_area.index(max(all_area)) del all_area x, y, w, h = cv2.boundingRect(contour[index]) frame_roi = curr_frame[y:y + h, x:x + w] # 2. re_y, re_x, _ = frame_roi.shape size = (size[1], size[0]) if re_y > re_x else (size[0], size[1]) frame = cv2.resize(frame_roi, size, cv2.INTER_AREA) # 3. gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 4. blur = cv2.GaussianBlur(gray, (5, 5), 0) # 5. frame_sobel = cv2.Sobel(blur, ddepth=-1, dx=1, dy=1, ksize=5) frame_pre = frame_sobel.copy() # 6. thres = ((np.max(frame_sobel) - np.min(frame_sobel)) * thres_condi).astype( np.uint8) frame_pre[frame_sobel >= thres] = 255 frame_pre[frame_sobel < thres] = 0 return frame, gray, frame_pre
#median filter median_image = cv2.medianBlur(gray_image, 5) cv2.imshow("Over the Clouds - median", median_image) cv2.imwrite('median.jpg', median_image) #min filter kernel = np.ones((5, 5), np.uint8) min_image = cv2.erode(gray_image, kernel, iterations=1) cv2.imshow("Over the Clouds - min", min_image) cv2.imwrite('min.jpg', min_image) #max filter max_image = cv2.dilate(gray_image, kernel, iterations=1) cv2.imshow("Over the Clouds - max", max_image) cv2.imwrite('max.jpg', max_image) #laplacian filter laplace_image = cv2.Laplacian(median_image, cv2.CV_64F) cv2.imwrite('laplace.jpg', laplace_image) #sobel horizontal filter sobelh_image = cv2.Sobel(median_image, cv2.CV_64F, 1, 0, ksize=5) cv2.imwrite('sobel_h.jpg', sobelh_image) #sobel vertical filter sobelv_image = cv2.Sobel(median_image, cv2.CV_64F, 0, 1, ksize=5) cv2.imwrite('sobel_v.jpg', sobelv_image) cv2.waitKey(0) cv2.destroyAllWindows()
def exact_feature(self): gradient_x = cv2.Sobel(self.img, cv2.CV_32F, 1, 0, ksize=3) gradient_y = cv2.Sobel(self.img, cv2.CV_32F, 0, 1, ksize=3) gradient_magnitude = np.sqrt(gradient_x**2 + gradient_y**2) gradient_angle = cv2.phase(gradient_x, gradient_y, angleInDegrees=True) return gradient_magnitude, gradient_angle
plt.subplot(4, 2, 3) plt.imshow(dst_prewitt, cmap='gray') plt.title('$f\'_x$: image filtered with Prewitt') ####################################### # cv2.Sobel() also exist kernel = 1 / 8 * np.array([[-1, 0, +1], [-2, 0, +2], [-1, 0, +1]]) dst_sobel = cv2.filter2D(img, -1, kernel) plt.subplot(4, 2, 4) plt.imshow(dst_sobel, cmap='gray') plt.title('$f\'_x$: image filtered with Sobel') ####################################### dst_cv2_sobel = cv2.Sobel(img, -1, 1, 0) #cv2.Sobel(img,ddepth,x_size,y_size) plt.subplot(4, 2, 5) plt.imshow(dst_cv2_sobel, cmap='gray') plt.colorbar() plt.title('cv2.Sobel X') ####################################### plt.subplot(4, 2, 6) plt.imshow(np.abs(dst_sobel - dst_sym)) plt.colorbar() plt.title('|sobel-symmetric|') ####################################### plt.subplot(4, 2, 7) plt.imshow(np.abs(dst_sobel - dst_prewitt))
# if event == cv2.EVENT_LBUTTONDOWN: # print('x = %d, y = %d'%(x, y)) # count = count + 1 # if(len(coordinate)==4): # coordinate.clear() # else: # coordinate.append(x) # coordinate.append(y) # if count %2 == 0 and count > 0 : # cropimg(coordinate) #cv2.imshow('res2',res2) sobely = cv2.Sobel(res2, cv2.CV_8U, 0, 1, ksize=5) counter = 0 #cv2.imshow('sobely',sobely) print(sobely.shape) height, width = sobely.shape for x in range(0, width): for y in range(0, height): if (sobely[y, x] < 255): sobely[y, x] = 0 else: if (counter == 0): counter = counter + 1 print(y, x) cv2.imshow("masked", sobely)
def combine_thresh_debug(img, sobel_kernel=3, mag_thresh=(0, 255)): bgr = img R = bgr[:, :, 2] G = bgr[:, :, 1] B = bgr[:, :, 0] cv2.imshow('R', R) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('G', G) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('B', B) cv2.waitKey(0) cv2.destroyAllWindows() hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) H = hls[:, :, 0] L = hls[:, :, 1] S = hls[:, :, 2] cv2.imshow('H', H) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('L', L) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('S', S) cv2.waitKey(0) cv2.destroyAllWindows() # Convert to grayscale # gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) gray = img # Take both Sobel x and y gradients sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel) abs_sobel = np.absolute(sobelx) scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel)) print(scaled_sobel.shape) cv2.imshow('img', scaled_sobel) cv2.waitKey(0) cv2.destroyAllWindows() bgr = scaled_sobel R = bgr[:, :, 2] G = bgr[:, :, 1] B = bgr[:, :, 0] cv2.imshow('H', R) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('L', G) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('S', B) cv2.waitKey(0) cv2.destroyAllWindows() hls = cv2.cvtColor(scaled_sobel, cv2.COLOR_BGR2HLS) H = hls[:, :, 0] L = hls[:, :, 1] S = hls[:, :, 2] cv2.imshow('H', H) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('L', L) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('S', S) cv2.waitKey(0) cv2.destroyAllWindows() thresh = (105, 255) binary_output = np.zeros_like(L) binary_output[(L > thresh[0]) & (L <= thresh[1])] = 1 # Return the binary image return binary_output
x, y, z = deproject_pixel_to_ros_point(cX, cY, frame_set.depth[cY, cX]) a, b, c = self.get_normal_vector(frame_set, (startX, startY, endX, endY)) normal_arctan = math.atan2(b, a) r = math.degrees(normal_arctan) return (x, y, r) def get_normal_vector(self, frame_set, (startX, startY, endX, endY)): depth_frame_16 = frame_set.depth df_dp = np.expand_dims(depth_frame_16, axis=-1).astype(np.uint8) df_dp = np.tile(df_dp, (1, 1, 3)) depth_frame = depth_frame_16.astype(np.float32) cX = startX + (endX - startX) / 2 cY = startY + (endY - startY) / 2 sample = depth_frame[cY-20: cY+20, cX-20: cX+20] cv.rectangle(df_dp, (cX-20, cY-20), (cX+20, cY+20), (255,0,0), 2) dzdx = cv.Sobel(sample,cv.CV_32F,1,0,ksize=5) dzdy = cv.Sobel(sample,cv.CV_32F,0,1,ksize=5) dzdx = np.median(dzdx) dzdy = np.median(dzdy) #Convert to ros coordinates: # z->a, -dzdx->b, -dzdy->c return (1.0, -dzdx, -dzdy) def latest_meas(self): return self._latest_meas def lost_target(self): return self._target_lost class SoldierTow(Alignment): def detect(self, frame_set):
sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) #读取输入图像,预处理 image = cv2.imread(args["image"]) cv_show('image', image) image = myutils.resize(image, width=300) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv_show('gray', gray) #礼帽操作,突出更明亮的区域 tophat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, rectKernel) cv_show('tophat', tophat) # gradX = cv2.Sobel( tophat, ddepth=cv2.CV_32F, dx=1, dy=0, #ksize=-1相当于用3*3的 ksize=-1) gradX = np.absolute(gradX) (minVal, maxVal) = (np.min(gradX), np.max(gradX)) gradX = (255 * ((gradX - minVal) / (maxVal - minVal))) gradX = gradX.astype("uint8") print(np.array(gradX).shape) cv_show('gradX', gradX) #通过闭操作(先膨胀,再腐蚀)将数字连在一起 gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel) cv_show('gradX', gradX) #THRESH_OTSU会自动寻找合适的阈值,适合双峰,需把阈值参数设置为0
def findCenterline(gray): #showg(gray) sobel_kernel = 7 thresh = (0.6, 1.3) # normalize t.s('normalize') gray = normalize(gray) t.e('normalize') # Calculate the x and y gradients t.s('sobel\t') sobelx = cv2.Sobel(gray, cv2.CV_32F, 1, 0, ksize=sobel_kernel) sobely = cv2.Sobel(gray, cv2.CV_32F, 0, 1, ksize=sobel_kernel) t.e('sobel\t') #graddir = np.arctan2(sobely, sobelx) # find the norm (magnitute) of gradient norm = np.sqrt(np.square(sobelx) + np.square(sobely)) norm = normalize(norm) #norm > 1 to get good edges # find left edges of while lanes t.s('find left edges') binary_output = np.zeros_like(gray, dtype=np.uint8) # XXX gray>1.5 is a sketchy solution that cut data size in half binary_output[(gray > 1.5) & (sobelx > 0) & (norm > 1)] = 1 #showg(binary_output) #label connected components connectivity = 8 output = cv2.connectedComponentsWithStats(binary_output, connectivity, cv2.CV_32S) # The first cell is the number of labels num_labels = output[0] # The second cell is the label matrix labels = output[1] # The third cell is the stat matrix stats = output[2] # The fourth cell is the centroid matrix centroids = output[3] ''' # for DEBUG # Map component labels to hue val label_hue = np.uint8(179*labels/np.max(labels)) blank_ch = 255*np.ones_like(label_hue) labeled_img = cv2.merge([label_hue, blank_ch, blank_ch]) # cvt to BGR for display labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR) # set bg label to black labeled_img[label_hue==0] = 0 showg(labeled_img) ''' # find the two longest left edges line_labels = np.argsort(stats[:, cv2.CC_STAT_AREA][1:])[-2:] + 1 # list of centroids with corresponding left/right edge (of a white line) long_edge_centroids = [] long_edge_lr = "" long_edge_label = [] #XXX error: out of bond if (stats[line_labels[0], cv2.CC_STAT_AREA] > 300): long_edge_centroids.append(centroids[line_labels[0], 0]) long_edge_lr += 'L' long_edge_label.append(labels == line_labels[0]) if (stats[line_labels[1], cv2.CC_STAT_AREA] > 300): long_edge_centroids.append(centroids[line_labels[1], 0]) long_edge_lr += 'L' long_edge_label.append(labels == line_labels[1]) t.e('find left edges') # find right edge of lanes # XXX gray>1.5 is a sketchy solution that cut data size in half t.s('find right edg') binary_output = np.zeros_like(gray, dtype=np.uint8) binary_output[(gray > 1.5) & (sobelx < 0) & (norm > 1)] = 1 #label connected components connectivity = 8 output = cv2.connectedComponentsWithStats(binary_output, connectivity, cv2.CV_32S) # The first cell is the number of labels num_labels = output[0] # The second cell is the label matrix labels = output[1] # The third cell is the stat matrix stats = output[2] # The fourth cell is the centroid matrix centroids = output[3] line_labels = np.argsort(stats[:, cv2.CC_STAT_AREA][1:])[-2:] + 1 if (stats[line_labels[0], cv2.CC_STAT_AREA] > 300): long_edge_centroids.append(centroids[line_labels[0], 0]) long_edge_lr += 'R' long_edge_label.append(labels == line_labels[0]) if (stats[line_labels[1], cv2.CC_STAT_AREA] > 300): long_edge_centroids.append(centroids[line_labels[1], 0]) long_edge_lr += 'R' long_edge_label.append(labels == line_labels[1]) # rank the edges based on centroid order = np.argsort(long_edge_centroids) long_edge_centroids = np.array(long_edge_centroids)[order] temp_lr = "" for i in order: temp_lr += long_edge_lr[i] long_edge_lr = temp_lr long_edge_label = np.array(long_edge_label)[order] t.e('find right edg') # now we analyze the long edges we have # case notation: e.g.(LR) -> left edge, right edge, from left to right # this logical is based on the assumption that the edges we find are lane edges # now we distinguish between several situations t.s('find centerline - lr analysis') flag_fail_to_find = False flag_good_road = False flag_one_lane = False centerPoly = None # case 1: if we find one and only one pattern (?RL?), we got a match if (long_edge_lr.count('RL') == 1): index = long_edge_lr.find('RL') with warnings.catch_warnings(record=True) as w: left_poly = fitPoly(long_edge_label[index]) index += 1 right_poly = fitPoly(long_edge_label[index]) if len(w) > 0: raise Exception('fail to fit poly') else: flag_good_road = True center_poly = findCenterFromSide(left_poly, right_poly) # case 2: we only see one edge of any sort if (len(long_edge_lr) == 1): with warnings.catch_warnings(record=True) as w: side_poly = fitPoly(long_edge_label[0]) if len(w) > 0: raise Exception('fail to fit poly') else: flag_one_lane = True # case 3: if we get (LR), then we are stepping on a lane, but don't know which that lane is (LR) # in this case drive on this lane until we see the other lane elif (long_edge_lr == 'LR'): index = 0 with warnings.catch_warnings(record=True) as w: left_poly = fitPoly(long_edge_label[index]) index += 1 right_poly = fitPoly(long_edge_label[index]) if len(w) > 0: raise Exception('fail to fit poly') else: flag_one_lane = True side_poly = findCenterFromSide(left_poly, right_poly) # otherwise we are completely lost else: flag_fail_to_find = True pass # based on whether the line inclines to the left or right, guess which side it is if (flag_one_lane == True): x0 = side_poly[0] * 1**2 + side_poly[1] * 1 + side_poly[ 2] - x_size / 2 x1 = side_poly[0] * crop_y_size**2 + side_poly[ 1] * crop_y_size + side_poly[2] - x_size / 2 if (x1 - x0 > 0): side = 'right' else: side = 'left' t.e('find centerline - lr analysis') binary_output = None if (flag_good_road == True): # DEBUG - for producing anice testimg ''' t.s('generate testimg') # Generate x and y values for plotting ploty = np.linspace(0, gray.shape[0]-1, gray.shape[0] ) left_fitx = left_poly[0]*ploty**2 + left_poly[1]*ploty + left_poly[2] right_fitx = right_poly[0]*ploty**2 + right_poly[1]*ploty + right_poly[2] # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the blank image binary_output = np.zeros_like(gray,dtype=np.uint8) cv2.fillPoly(binary_output, np.int_([pts]), 1) # Draw centerline onto the image centerlinex = center_poly[0]*ploty**2 + center_poly[1]*ploty + center_poly[2] pts_center = np.array(np.transpose(np.vstack([centerlinex, ploty]))) cv2.polylines(binary_output,np.int_([pts_center]), False, 5,10) #driveSys.testimg = np.dstack(40*[binary_output,binary_output,binary_output]) # END-DEBUG t.e('generate testimg') ''' pass # get centerline in top-down view t.s('change centerline perspective') # prepare sample points ploty = np.linspace(0, gray.shape[0] - 1, gray.shape[0]) centerlinex = center_poly[0] * ploty**2 + center_poly[ 1] * ploty + center_poly[2] # convert back to uncropped space ploty += y_size / 2 pts_center = np.array(np.transpose(np.vstack([centerlinex, ploty]))) pts_center = cam.undistortPts(np.reshape(pts_center, (1, -1, 2))) # unwarp and change of units for i in range(len(pts_center[0])): pts_center[0, i, 0], pts_center[0, i, 1] = transform( pts_center[0, i, 0], pts_center[0, i, 1]) # now pts_center should contain points in vehicle coordinate with x axis being rear axle,unit in cm #fit(y,x) fit = np.polyfit(pts_center[0, :, 1], pts_center[0, :, 0], 2) t.e('change centerline perspective') return fit if (flag_one_lane == True): # DEBUG - for producing anice testimg ''' t.s('generate testimg') # Generate x and y values for plotting ploty = np.linspace(0, gray.shape[0]-1, gray.shape[0] ) binary_output = np.zeros_like(gray,dtype=np.uint8) # Draw centerline onto the image sidelinex = side_poly[0]*ploty**2 + side_poly[1]*ploty + side_poly[2] pts_side = np.array(np.transpose(np.vstack([sidelinex, ploty]))) cv2.polylines(binary_output,np.int_([pts_side]), False, 1,1) #driveSys.testimg = np.dstack(250*[binary_output,binary_output,binary_output]) t.e('generate testimg') ''' # END-DEBUG # get centerline in top-down view t.s('change centerline perspective') # prepare sample points ploty = np.linspace(0, gray.shape[0] - 1, gray.shape[0]) sidelinex = side_poly[0] * ploty**2 + side_poly[ 1] * ploty + side_poly[2] # convert back to uncropped space ploty += y_size / 2 pts_side = np.array(np.transpose(np.vstack([sidelinex, ploty]))) pts_side = cam.undistortPts(np.reshape(pts_side, (1, -1, 2))) # unwarp and change of units for i in range(len(pts_side[0])): pts_side[0, i, 0], pts_side[0, i, 1] = transform(pts_side[0, i, 0], pts_side[0, i, 1]) # now pts_side should contain points in vehicle coordinate with x axis being rear axle,unit in cm #XXX this is really stupid and inefficient if (side == 'left'): pts_side[0, i, 0] = pts_side[0, i, 0] + 0.5 * driveSys.lanewidth else: pts_side[0, i, 0] = pts_side[0, i, 0] - 0.5 * driveSys.lanewidth # now pts_side should contain points in vehicle coordinate with x axis being rear axle,unit in cm #fit(y,x) fit = np.polyfit(pts_side[0, :, 1], pts_side[0, :, 0], 2) t.e('change centerline perspective') return fit return None
def filter(self, frame): frame_dx = cv2.Sobel(frame, cv2.CV_64F, 1, 0, ksize=self.ksize) frame_dy = cv2.Sobel(frame, cv2.CV_64F, 0, 1, ksize=self.ksize) frame_mag = np.abs(frame_dx) + np.abs(frame_dy) frame_mag /= frame_mag.max() return frame_mag
def gradient(blurred): # 对平滑后的图像使用Sobel算子计算水平方向和竖直方向的一阶导数(图像梯度)(Gx和Gy) # 根据得到的这两幅梯度图找到边界的梯度和方向 # ksize=3是因为cv2.Canny方法默认的Ksize=3 # cv2.CV_64F是使用64位存储,为了运算不越界 sobelx = cv2.Sobel(blurred, cv2.CV_64F, 1, 0, ksize=3) sobely = cv2.Sobel(blurred, cv2.CV_64F, 0, 1, ksize=3) # 建立两个np.arr类型的数据 sobel = np.zeros((len(sobelx), len(sobelx[0]))) theat = np.zeros((len(sobelx), len(sobelx[0]))) # 根据公式计算合成梯度值 for i in range(len(sobelx)): for j in range(len(sobelx[0])): sobel[i][j] = math.sqrt(sobelx[i][j] * sobelx[i][j] + sobely[i][j] * sobely[i][j]) # 将弧度转化为度数 if sobelx[i][j] != 0: theat[i][j] = math.atan( sobely[i][j] / sobelx[i][j]) * 180 / math.pi else: if sobely[i][j] < 0: theat[i][j] = -90 elif sobely[i][j] > 0: theat[i][j] = 90 elif sobely[i][j] == 0: theat[i][j] = 45 # 做边界的划分,划分为四个梯度值 temp = theat[i][j] if -112.5 < temp <= -67.5: theat[i][j] = 90 elif -67.5 < temp <= -22.5: theat[i][j] = 135 elif -22.5 < temp <= 22.5: theat[i][j] = 0 elif 22.5 < temp <= 67.5: theat[i][j] = 45 elif 67.5 < temp <= 112.5: theat[i][j] = 90 # 以下是 非极大值抑制 的算法 # 模型结束后移到另一个文件并做精简 # 还要考虑和nms算法的异同 # 做非边界部分的判断 # i从1到倒数第二,j同样 for i in range(1, len(sobel) - 1): for j in range(1, len(sobel[0]) - 1): if theat[i][j] == 0: if sobel[i][j] < max(sobel[i + 1][j], sobel[i - 1][j]): blurred[i][j] = 0 elif theat[i][j] == 45: if sobel[i][j] < max(sobel[i + 1][j - 1], sobel[i - 1][j - 1]): blurred[i][j] = 0 elif theat[i][j] == 90: if sobel[i][j] < max(sobel[i][j + 1], sobel[i][j - 1]): blurred[i][j] = 0 elif theat[i][j] == 135: if sobel[i][j] < max(sobel[i + 1][j - 1], sobel[i - 1][j + 1]): blurred[i][j] = 0 # 上边界非端点处理,i=0,i不能减1 for j in range(1, len(sobelx[0]) - 1): i = 0 if theat[0][j] == 0: if sobel[i][j] < sobel[i + 1][j]: blurred[i][j] = 0 elif theat[0][j] == 45: if sobel[i][j] < sobel[i + 1][j - 1]: blurred[i][j] = 0 elif theat[0][j] == 90: if sobel[i][j] < max(sobel[i][j + 1], sobel[i][j - 1]): blurred[i][j] = 0 elif theat[0][j] == 135: if sobel[i][j] < sobel[i + 1][j - 1]: blurred[i][j] = 0 # 下边界非端点,i = 255,i不能加1 for j in range(1, len(sobel[0]) - 1): i = len(theat) - 1 if theat[i][j] == 0: if sobel[i][j] < sobel[i - 1][j]: blurred[i][j] = 0 elif theat[i][j] == 45: if sobel[i][j] < sobel[i - 1][j - 1]: blurred[i][j] = 0 elif theat[i][j] == 90: if sobel[i][j] < max(sobel[i][j + 1], sobel[i][j - 1]): blurred[i][j] = 0 elif theat[i][j] == 135: if sobel[i][j] < sobel[i - 1][j + 1]: blurred[i][j] = 0 # 左边界非端点,j=0,j不能减1 for i in range(1, len(sobel) - 1): j = 0 if theat[i][j] == 0: if sobel[i][j] < max(sobel[i + 1][j], sobel[i - 1][j]): blurred[i][j] = 0 elif theat[i][j] == 90: if sobel[i][j] < sobel[i][j + 1]: blurred[i][j] = 0 elif theat[i][j] == 135: if sobel[i][j] < sobel[i - 1][j + 1]: blurred[i][j] = 0 # 右边界非端点,j=255,不能加1 for i in range(1, len(sobel) - 1): j = len(sobel[0]) - 1 if theat[i][j] == 0: if sobel[i][j] < max(sobel[i + 1][j], sobel[i - 1][j]): blurred[i][j] = 0 elif theat[i][j] == 45: if sobel[i][j] < max(sobel[i + 1][j - 1], sobel[i - 1][j - 1]): blurred[i][j] = 0 elif theat[i][j] == 90: if sobel[i][j] < sobel[i][j - 1]: blurred[i][j] = 0 elif theat[i][j] == 135: if sobel[i][j] < sobel[i + 1][j - 1]: blurred[i][j] = 0 # 左上角。i,j不能减1 if theat[0][0] == 0: if sobel[0][0] < sobel[0 + 1][0]: blurred[0][0] = 0 elif theat[0][0] == 90: if sobel[0][0] < sobel[0][0 + 1]: blurred[0][0] = 0 # 左下角,i不能加,j不能减 if theat[len(sobel) - 1][0] == 0: if sobel[len(sobel) - 1][0] < sobel[len(sobel) - 1 - 1][0]: blurred[len(sobel - 1)][0] = 0 elif theat[len(sobel) - 1][0] == 90: if sobel[len(sobel) - 1][0] < sobel[len(sobel) - 1][0 + 1]: blurred[len(sobel) - 1][0] = 0 elif theat[len(sobel) - 1][0] == 135: if sobel[len(sobel) - 1][0] < sobel[len(sobel) - 1 - 1][0 + 1]: blurred[len(sobel) - 1][0] = 0 #右下角 右上脚暂时没写,可仿照上文实现 return blurred
def data_gen_sobel(path, ids): """ In this generator every input is interesected with the Prostate contour :param path: :param folders_to_read: :param img_names: :param roi_names: :param tot_ex_per_img :return: """ print("Inside generator...") curr_idx = -1 # First index to use all_files_per_digit = [] tot_files_per_digit = [] acc_files_per_digit = [] last_id = 0 for cur_folder in range(10): all_files = listdir(join(path, F'{cur_folder}')) tot_files = len(all_files) last_id += tot_files all_files_per_digit.append(all_files) tot_files_per_digit.append(tot_files) acc_files_per_digit.append(last_id) tot_files_per_digit = np.array(tot_files_per_digit) acc_files_per_digit = np.array(acc_files_per_digit) all_files_per_digit = np.array(all_files_per_digit) tot_files = acc_files_per_digit[-1] print(tot_files_per_digit) print(acc_files_per_digit) while True: # These lines are for sequential selection if curr_idx >= len(ids) or curr_idx == -1: curr_idx = 0 np.random.shuffle( ids ) # We shuffle the folders every time we have tested all the examples else: curr_idx += 1 try: cur_file = ids[curr_idx] cur_folder = np.argmax(acc_files_per_digit >= cur_file) if cur_folder > 0: folder_idx = cur_file - acc_files_per_digit[cur_folder] else: folder_idx = cur_file file_name = join(path, F'{cur_folder}', F'{all_files_per_digit[cur_folder][folder_idx]}') X_rgb = cv2.imread(file_name) X = X_rgb[:, :, 0] Y = cv2.Sobel(X, cv2.CV_64F, 1, 0, ksize=3) # plt.subplots(1,2) # plt.subplot(1,2,1) # plt.imshow(X) # plt.subplot(1,2,2) # plt.imshow(Y) # plt.show() # Normalizing the input X = X / 255 max_val = 255 * 4 Y = (Y + max_val) / (max_val * 2) XF = np.expand_dims(np.expand_dims(X, axis=2), axis=0) YF = np.expand_dims(np.expand_dims(Y, axis=2), axis=0) yield XF, YF except Exception as e: print( F"----- Not able to generate for curr_idx: {curr_idx}, file_name: {file_name}" )
#test-05 edge detection import cv2 as cv #read image img = cv.imread("./opencv/lena.jpg") cv.imshow("source", img) #change image space for BGR to GRAY gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow("gray", gray) cv.imwrite('./opencv/gray.jpg', gray) #source image edge detection with Sobel sobel = cv.Sobel(img, cv.CV_8U, 1, 1, 3) cv.imshow("sobel", sobel) cv.imwrite('./opencv/sobel.jpg', sobel) #gray image dege detection with Sobel graysobel = cv.Sobel(gray, cv.CV_8U, 1, 1, ksize = 3) cv.imshow("graysobel", graysobel) cv.imwrite('./opencv/sobelgray.jpg', graysobel) #gray image edge detection with Laplace laplace = cv.Laplacian(gray, cv.CV_8U, (3, 3)) cv.imshow('laplace', laplace) cv.imwrite('./opencv/laplace.jpg', laplace) #gray image dege detection with Canny canny = cv.Canny(gray, 100, 200, (3, 3))
import cv2 import numpy as np cap = cv2.VideoCapture(0) while True: _, frame = cap.read() frame = cv2.flip(frame,2) laplacian = cv2.Laplacian(frame, cv2.CV_64F) sobelx = cv2.Sobel(frame,cv2.CV_64F,1,0,ksize = 5) sobely = cv2.Sobel(frame,cv2.CV_64F,0,1,ksize = 5) edges = cv2.Canny(frame,70,70) cv2.imshow('normal',frame) cv2.imshow('laplacian',laplacian) #cv2.imshow('sobelx',sobelx) #cv2.imshow('sobely',sobely) cv2.imshow('edges',edges) if cv2.waitKey(1) & 0xFF == ord('a'): break cap.release() cv2.destroyAllWindows()
# 图像渐变和边缘检测 import cv2 import numpy as np img = cv2.imread('../data/cluo.jpg') hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) lower_red = np.array([30, 150, 50]) upper_red = np.array([255, 255, 180]) mask = cv2.inRange(hsv, lower_red, upper_red) res = cv2.bitwise_and(img, img, mask=mask) laplacian = cv2.Laplacian(img, cv2.CV_64F) sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5) sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5) # canny 边缘检测 edges = cv2.Canny(img, 100, 200) cv2.imshow('Edges', edges) cv2.imshow('Original', img) cv2.imshow('Res', res) cv2.imshow('laplacian', laplacian) cv2.imshow('sobelx', sobelx) cv2.imshow('sobely', sobely) cv2.waitKey(0) & 0xFF cv2.destroyAllWindows()
# @File : op_readingImg.py # @Software: PyCharm import cv2 import numpy as np import matplotlib.pyplot as plt image_file = '../captcha/tupian/bailing.png' img = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE) # 输入图片的大小 h, w = img.shape #print(h,w) # 索贝尔滤波器(边缘检测器) sobel_horizontal = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5) # 运行索贝尔垂直检测器 sobel_verrical = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5) # 拉普拉斯边缘检测器 laplacian = cv2.Laplacian(img, cv2.CV_64F) # Canny边检测 canny = cv2.Canny(img, 50, 240) #plt.imshow(img, cmap='gray', interpolation='bicubic') #plt.imshow(sobel_horizontal) #plt.xticks([]),plt.yticks([]) #plt.show() cv2.imshow('sobel_horizontal', sobel_horizontal)
# img_cont = cv.drawContours(img_cont_ref/2, contours, 0, (255,0,0)) # cv.imshow('Imagine contures', img_cont) img = cv.drawContours(image=img, contours=contours, contourIdx=0, color=(255, 0, 0)) cv.circle(img=img, center=(np.int(x_centr), np.int(y_centr)), radius=1, color=(0, 255, 255), thickness=5) cv.imshow('Image', img) sobelx = cv.Sobel(img_gray, cv.CV_64F, 1, 0, ksize=5) sobely = cv.Sobel(img_gray, cv.CV_64F, 0, 1, ksize=5) grad = np.sqrt(sobelx**2 + sobely**2) grad = grad / np.max(grad) orient = np.rad2deg(np.arctan2(sobelx, sobely)) orient = orient + 180 orient = np.uint16(orient) Rtable = [[] for i in range(360)] for cont in contours[0]: wek_kat = orient[cont[0, 1], cont[0, 0]] wek_dist = np.sqrt((cont[0, 0] - x_centr)**2 + (cont[0, 1] - y_centr)**2) wek_orient = np.arctan2(cont[0, 1] - y_centr, cont[0, 0] - x_centr)
def process_plate(data, ip): #path_pic = "lisi.jpg" #Cargamos la foto enviada por el raspberry pi #image = cv2.imread(path_pic,1) #show_img(cv2,image,'original') #preprocessed_image = image image = cv2.imdecode(data, 1) preprocessed_image = cv2.imdecode(data, 1) cv2.imwrite('original_image_path.jpg', image) #se detecta la region donde se encuentra la placa en la imagen #convertimos la imagen a escala de grices escala_grices = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #show_img('gris',escala_grices) cv2.imwrite('gri_image_path.jpg', escala_grices) #Se remueve un poco el ruido de la imagen(osea los ejes verticales) blur = cv2.GaussianBlur(escala_grices, (5, 5), 0) #show_img('ruido',blur) cv2.imwrite('difum_image_path.jpg', blur) #Buscamos el gradiente grad = cv2.Sobel(blur, cv2.CV_8U, 1, 0, ksize=3) #show_img('gradiente',grad) cv2.imwrite('grad_image_path.jpg', grad) #se aplica un filtro para combertir la imagen a una imagen binaria(0 y 1) _, umbral = cv2.threshold(grad, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) #show_img('threshold',umbral) cv2.imwrite('thres_image_path.jpg', umbral) #Operacion Morfologica para quitar los espacios en blanco entre cada linea de borde vertical Estructura = cv2.getStructuringElement(cv2.MORPH_RECT, (23, 4)) morfo = cv2.morphologyEx(umbral, cv2.MORPH_CLOSE, Estructura) #show_img('Morfologica',morfo) #se obtuviero las posibles regiones donde se encuetra la placa cv2.imwrite('mor_image_path.jpg', morfo) contours, _ = cv2.findContours(morfo, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) draw_contours(image, contours) #show_img("candidatas",image) cv2.imwrite('candi_image_path.jpg', image) try: verify_plate(preprocessed_image, image, contours) except: print "Not plate found" not_image_found(parsed_plate) return False #show_img('no pro',preprocessed_image) #show_img('verified',image) cv2.imwrite('verified_image_path.jpg', image) cv2.waitKey(0) cv2.destroyAllWindows() tess = os.popen('tesseract bien_image_path.jpg output', "r").read() cat = os.popen('cat output.txt', "r").read() print cat #plat = cat.replace("-","") #print plat print ip #plate = plat.strip(); #plate = "L8989" plate = cat.strip() parsed_plate = SCP_Parser(plate).parse_plate() #cam = 1 print "request = Placa: " + parsed_plate + " Ip : " + str(ip) request = requests.get( "http://scpweb.herokuapp.com/api/authorize_plate?plate=" + parsed_plate + "&cam=192.168.0.41") print request.json() response = request.json() f = open('output.txt', 'r+') f.truncate() tess = None cat = None if parsed_plate.strip() == "IMA6EN": parsed_plate = "No encontrada" ui = UI("original_image_path.jpg", "gri_image_path.jpg", "difum_image_path.jpg", "grad_image_path.jpg", "thres_image_path.jpg", "mor_image_path.jpg", "candi_image_path.jpg", "verified_image_path.jpg", "verified_plate_image_path.jpg", "bien_image_path.jpg", "192.168.0.40", parsed_plate) not_image_found() #return (response['message'] != "Vehicle not found" and response['message'] != "Cam not found" and response['message'] != "Visitor not found" and response['message']) return (response['message'] == "Vehicle Found")