def detect(self, image): markers = [] # Stage 1: Detect edges in image gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) edges = cv2.Canny(gray, 100, 200) # Stage 2: Find contours contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10] for contour in contours: # Stage 3: Shape check perimeter = cv2.arcLength(contour, True) approx = cv2.approxPolyDP(contour, 0.01 * perimeter, True) if len(approx) == self.QUADRILATERAL_POINTS: # Stage 4: Perspective warping topdown_quad = get_topdown_quad(gray, approx.reshape(4, 2)) # Stage 5: Border check if topdown_quad[(topdown_quad.shape[0] / 100.0) * 5, (topdown_quad.shape[1] / 100.0) * 5] > self.BLACK_THRESHOLD: continue # Stage 6: Get marker pattern marker_pattern = None try: marker_pattern = get_marker_pattern( topdown_quad, self.BLACK_THRESHOLD, self.WHITE_THRESHOLD) except: continue if not marker_pattern: continue # Stage 7: Match marker pattern marker_found, marker_rotation, marker_name = match_marker_pattern( marker_pattern) if marker_found: # Stage 8: Duplicate marker check if marker_name in [ marker[self.MARKER_NAME_INDEX] for marker in markers ]: continue # Stage 9: Get rotation and translation vectors rvecs, tvecs = get_vectors(image, approx.reshape(4, 2), self.mtx, self.dist) markers.append( [rvecs, tvecs, marker_rotation, marker_name]) return markers
import cv2 import numpy as np cap = cv2.VideoCapture(0) while True: _, frame = cap.read() laplacian = cv2.Laplacian(frame, cv2.CV_64F) sobelx = cv2.Sobel(frame, cv2.CV_64F, 1, 0, ksize=5) sobely = cv2.Sobel(frame, cv2.CV_64F, 0, 1, ksize=5) edges = cv2.Canny(frame, 100, 200) cv2.imshow('original', frame) cv2.imshow('laplacian', laplacian) cv2.imshow('sobelx', sobelx) cv2.imshow('sobely', sobely) cv2.imshow('edges', edges) k = cv2.waitKey(5) & 0xFF if k == 27: break cv2.destroyAllWindows() cv2.release()
# circle outline radius = i[2] cv2.circle(img, center, radius, (255, 0, 255), 3) ''' ''' #사람 인식 human_cascade = cv2.CascadeClassifier('haarcascade_fullbody.xml') human=human_cascade.detectMultiScale(gray,1.1,4) for(x,y,w,h) in human: cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,220),3) ''' #경기장 인식 edges = cv2.Canny(gray, 400, 450, apertureSize=3) lines = cv2.HoughLines(edges, 1, np.pi / 180, 100) for i in range(len(lines)): for rho, theta in lines[i]: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2) cv2.imshow('canny', edges)
while rval and img_number < 3001: # tutorial https://medium.com/analytics-vidhya/hand-detection-and-finger-counting-using-opencv-python-5b594704eb08 # tutorial for Canny https://hub.packtpub.com/opencv-detecting-edges-lines-shapes/ #cv2.imwrite('testing.jpg', cv2.Canny(img, 200, 300)) #cv2.imshow('canny', cv2.imread('testing.jpg')) time.sleep(0.01) img = cv2.imread( 'training_datasets/asl_alphabet_train/B/B' + str(img_number) + '.jpg', 0) #hsvim = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #lower = np.array([0, 48, 80], dtype = "uint8") #upper = np.array([20, 255, 255], dtype = "uint8") #skinRegionHSV = cv2.inRange(hsvim, lower, upper) #blurred = cv2.blur(skinRegionHSV, (2,2)) #ret,thresh = cv2.threshold(blurred,0,255,cv2.THRESH_BINARY) cv2.imshow('B', cv2.Canny(img, 200, 300)) #contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #try: #contours = max(contours, key=lambda x: cv2.contourArea(x),default=0) #except: # print('oops') #contours = max(contours, key=lambda x: cv2.contourArea(x),default=0) #cv2.drawContours(frame, contours, -1, (255,255,0), 2) #cv2.imshow("contours", frame) # ##------------------------------------------------------- ## HULL CHANGES ##------------------------------------------------------- #for i in range(len(contours)): # hull = cv2.convexHull(contours[i]) # cv2.drawContours(frame, [hull], -1, (255, 0, 0), 2)
# -*- coding: utf-8 -*- """ Created on Tue May 5 12:24:50 2020 @author: JIt Shil """ import cv2 as cv import numpy as np from matplotlib import pyplot as plt img = cv.imread('master.jpg', 0) lp = cv.Laplacian(img, cv.CV_64F) sobelx = cv.Sobel(img, cv.CV_64F, 1, 0) sobely = cv.Sobel(img, cv.CV_64F, 0, 1) canny = cv.Canny(img, 100, 200) lp = np.uint8(np.absolute(lp)) sobelx = np.uint8(np.absolute(sobelx)) sobely = np.uint8(np.absolute(sobely)) combined = cv.bitwise_or(sobelx, sobely) titles = ['original', 'laplacian', 'sobelx', 'sobely', 'canny', 'combined'] images = [img, lp, sobelx, sobely, canny, combined] for i in range(6): plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray') plt.title(titles[i]) plt.xticks([]), plt.yticks([]) plt.show()
import numpy as np import cv2 from numpy import linalg as LA # Read in and grayscale the image image = mpimg.imread('exit-ramp.jpg') gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY) # Define a kernel size and apply Gaussian smoothing kernel_size = 5 blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0) # Define our parameters for Canny and apply low_threshold = 50 high_threshold = 150 edges = cv2.Canny(blur_gray, low_threshold, high_threshold) fig3 = plt.figure() plt.imshow(edges, cmap='Greys_r') ysize = edges.shape[0] xsize = edges.shape[1] region_select = np.copy(edges) # Define a triangle region of interest # Keep in mind the origin (x=0, y=0) is in the upper left in image processing # Note: if you run this code, you'll find these are not sensible values!! # But you'll get a chance to play with them soon in a quiz margin = 80.0 left_bottom = [0, ysize] right_bottom = [xsize, ysize]
HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) H, S, V = cv2.split(HSV) Lower = np.array([0, 15, 50]) Upper = np.array([255, 255, 255]) mask = cv2.inRange(HSV, Lower, Upper) cv2.imshow("HSV", mask) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5), (-1, -1)) kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (19, 19), (-1, -1)) erode = cv2.erode(mask, kernel) cv2.imshow("erode", erode) dilate = cv2.dilate(erode, kernel2) cv2.imshow("dilate", dilate) canny = cv2.Canny(dilate, 3, 9, 3) cv2.imshow("canny", canny) gblur = cv2.GaussianBlur(canny, (3, 3), 4, 4) cv2.imshow("guassianBlur", gblur) lines = cv2.HoughLines(gblur, 1, np.pi / 180, 100, 0, 0) result = img.copy() cv2.waitKey() for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b))
def adjust_settings(cap, image_settings): cv2.namedWindow("Ventana") cv2.resizeWindow("Ventana", 500, 200) #Sliders para ajuste del tamanio del area de trabajo para recortar cosas #por fuera del plato. Estos objetos meten ruido en la deteccion cv2.createTrackbar("Xmax", "Ventana", 519, 640, empty) cv2.createTrackbar("Xmin", "Ventana", 112, 640, empty) cv2.createTrackbar("Ymax", "Ventana", 420, 480, empty) cv2.createTrackbar("Ymin", "Ventana", 87, 480, empty) #Umbrales de deteccion de contorno utilizados en Canny cv2.namedWindow("Umbrales") cv2.resizeWindow("Umbrales", 500, 150) cv2.createTrackbar("Umbral1", "Umbrales", 120, 500, empty) cv2.createTrackbar("Umbral2", "Umbrales", 315, 500, empty) #Umbral para comparacion de escala de grises #la idea es poner en negro todos los pixeles con luminocidad menor #a este umbral cv2.createTrackbar("Ugr", "Umbrales", 245, 255, empty) #Umbral de area (no esta en uso) buscaba un umbral minimo en un contorno para decir que es la pelota cv2.createTrackbar("Area", "Umbrales", 3000, 10000, empty) while (True): #obtencion de los valores de los sliders x_min = cv2.getTrackbarPos("Xmin", "Ventana") x_max = cv2.getTrackbarPos("Xmax", "Ventana") y_min = cv2.getTrackbarPos("Ymin", "Ventana") y_max = cv2.getTrackbarPos("Ymax", "Ventana") u_1 = cv2.getTrackbarPos("Umbral1", "Umbrales") u_2 = cv2.getTrackbarPos("Umbral2", "Umbrales") u_gris = cv2.getTrackbarPos("Ugr", "Umbrales") u_area = cv2.getTrackbarPos("Area", "Umbrales") u_area = 2000 success, img = cap.read() #creo mascara llena de 0 para recortar imagen mask = np.zeros(img.shape[:2], dtype="uint8") #creo rectangulo para utilizar como mascara #esta mascara esta parametrizada con los slider para poder dejar afuera obstalucos cv2.rectangle(mask, (x_min, y_min), (x_max, y_max), 255, -1) #enmascaramos la imagen de video masked = cv2.bitwise_and(img, img, mask=mask) imagen_recortada = masked #invierto color a escala de grises invert = cv2.cvtColor(masked, cv2.COLOR_BGR2GRAY) # Lo que hace es normalizar la imagen en brillo para aprovechar todo el rango dinamico #no esta en uso, no aportaba mejoras significativas #aplico histograma #equ = cv2.equalizeHist(invert) #cv2.imshow('Ecualizadas', equ) #invert=equ #agrego mascara para detectar los blancos _, binarizada = cv2.threshold(invert, u_gris, 255, cv2.THRESH_BINARY) #detecto contornos canny = cv2.Canny(binarizada, u_1, u_2) #dilato los contornos canny = cv2.dilate(canny, None, iterations=1) #supresion de sombras #_,sombra=cv2.threshold(canny,254,255,cv2.THRESH_BINARY) #obtengo los contornos #podemos probar otros algoritmos de deteccion de contornos contornos, _ = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) cv2.imshow("Camara", img) cv2.imshow("Canny", canny) cv2.imshow("Imagen Recortada", imagen_recortada) if cv2.waitKey(1) & 0xFF == ord('q'): cv2.destroyAllWindows() break return x_min, x_max, y_min, y_max, u_1, u_2, u_gris, u_area
def canny(frame): gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) canny = cv2.Canny(gray, 50, 150) return canny
def line_image(image, canny_threshold1=0, canny_threshold2=0, hough_threshold=2, min_line_length=3, max_gap=5, rho=2.0, theta=.3): #Read images, flip them vertically, and convert them to RGB color order #img_all = np.array(cv2.cvtColor(cv2.imread(images), cv2.COLOR_BGR2RGB)) img = np.array(image[582:, :]) #print(img_all[:, :, :] > [100, 0, 0]) #img_all = img_all[:, :] > [125, 125, 125] #img_all = img_all[:, :, :] - 100 #img_all = np.array([img[:, :] for img in img_all]) #new_img = list(img_all[0]) #print('ALL: ', img_all[0]) #for i1, v1 in enumerate(new_img): # for i2, v2 in enumerate(v1): #print(pixel) # if (v2[1] < 125 and v2[2] < 125) or v2[0] > 100: # new_img[i1][i2] = [0, 0, 0] #pass #new_img = np.array([new_img]) #print('NEW: ', new_img[0]) #return new_img[0] #find image dimensions hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) lower_yellow = np.array([0, 100, 100]) upper_yellow = np.array([70, 255, 255]) #upper_yellow = np.array([70, 100, 100]) mask = cv2.inRange(hsv, lower_yellow, upper_yellow) res = cv2.bitwise_and(img, img, mask=mask) gray_arr = np.array(cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)) #blur images to avoid recognizing small lines blur_arr = np.array(cv2.blur(gray_arr,(1,1))) #imshow(blur_arr[0]) #return blur_arr[0] #blur_arr = gray_arr #use canny threshold to find edges of shapes canny_arr = np.array(cv2.Canny(blur_arr, canny_threshold1, canny_threshold2)) #return cv2.cvtColor(canny_arr,cv2.COLOR_GRAY2RGB) line_arr = [] line_coord_arr = [] line_count = 0 lines = cv2.HoughLinesP(canny_arr, rho, theta, hough_threshold, min_line_length, max_gap) if lines is not None: h = len(img) w = len(img[0]) minX, minY, maxX, maxY = lines[0][0] lines = np.array(lines).reshape(-1, 4) #print(lines) for line in lines: x1, y1, x2, y2 = line if(x1 > (w - (3.5*y1) - 1200) and (x1 < w - (1*y1) - 800) and x2 > (w - (3.5*y2) - 1200) and (x2 < w - (1*y2) - 800) and x2 < w/2 + 50 and y2 > 100): if y1 > minY or (y1 == minY and x1 > minX): minY = y1 minX = x1 if y2 > minY or (y2 == minY and x2 > minX): minY = y2 minX = x2 if x1 > maxX: maxY = y1 maxX = x1 if x2 > maxX: maxY = y2 maxX = x2 #cv2.line(img, (x1,y1),(x2,y2),(255,0,255),2) line_count += 1 cv2.line(img, (minX,minY),(maxX,maxY),(0,255,0),2) slopeY = (maxY-minY) slopeX = (maxX-minX) slope = slopeY/slopeX #cv2.line(img, (w//2,h),(int((w//2) - slopeX),int (h - slopeY)),(0,0,255),2) lineX2 = w//2 if slope != 0 and not math.isnan(slope): lineX2 = int((minX - minY/slope)) cv2.line(img, (w//2,h),(lineX2,0),(0,0,255),2) value = (lineX2 - w/2)/w print(value) return img
def readVideo(champions, path, threshold, second_inicial, frame_step, frame_stop, json_path): title = "threshold-" + str(threshold) + "_Si-" + str(second_inicial) root_path = json_path.split("target") champions_img = getChampions(champions, root_path[0] + "src/main/resources/img/") progress = int((frame_stop - (second_inicial * 30)) / 10) print(progress) champions_dict = {} for name in champions: champions_dict[name] = [] #¿?¿? champions_dict[name].append(None) h_champ, w_champ = champions_img[0].shape video_rgb = cv2.VideoCapture(path) video_rgb.set(cv2.CAP_PROP_POS_FRAMES, int((second_inicial * 30))) # select portion image _, frame = video_rgb.read() h = frame.shape[0] w = frame.shape[1] h1 = int(h - h / 4) w1 = int(w - w / 7) frame = frame[h1:h, w1:w] h1_peq = int(frame.shape[0] / 5) w1_peq = int(frame.shape[0] / 5) fram_pos = 0 while (video_rgb.isOpened()): ret, frame = video_rgb.read() if ret: if (fram_pos % progress == 0): print(str(fram_pos * 10 / progress), "% leido", flush=True) if (fram_pos % frame_step == 0): if (fram_pos < frame_stop): # crop map area frame = frame[h1:h, w1:w] # frame transformation frame = cv2.GaussianBlur(frame, (5, 5), 0) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame = cv2.Canny(frame, 50, 100, True) i = 0 for champ in (champions_img): # frame_compare = cv2.subtract(frame_canny,start) try: peqframe = frame[ champions_dict[champions[i]][-1][0] - h1_peq:champions_dict[champions[i]][-1][0] + h1_peq, champions_dict[champions[i]][-1][1] - w1_peq:champions_dict[champions[i]][-1][1] + w1_peq] res = cv2.matchTemplate(peqframe, champ, cv2.TM_CCOEFF_NORMED) _, max_val, _, max_loc = cv2.minMaxLoc(res) if (max_val > threshold): champions_dict[champions[i]].append([ max_loc[0] + int(w_champ / 2), max_loc[1] + int(h_champ / 2) ]) else: res = cv2.matchTemplate( frame, champ, cv2.TM_CCOEFF_NORMED) _, max_val, _, max_loc = cv2.minMaxLoc(res) if (max_val > threshold): champions_dict[champions[i]].append([ max_loc[0] + int(w_champ / 2), max_loc[1] + int(h_champ / 2) ]) else: champions_dict[champions[i]].append(None) except: res = cv2.matchTemplate(frame, champ, cv2.TM_CCOEFF_NORMED) _, max_val, _, max_loc = cv2.minMaxLoc(res) if (max_val > threshold): champions_dict[champions[i]].append([ max_loc[0] + int(w_champ / 2), max_loc[1] + int(h_champ / 2) ]) else: champions_dict[champions[i]].append(None) i += 1 i = 0 else: champions_dict["0frameStep"] = frame_step champions_dict["0seg,f_step,f_stop"] = [ second_inicial, frame_step, frame_stop ] writeJSON(json_path, title, champions_dict) return 1 fram_pos += 1 else: break champions_dict["0frameStep"] = frame_step champions_dict["0seg,f_step,f_stop"] = [ second_inicial, frame_step, frame_stop ] writeJSON(json_path, title, champions_dict)
from scipy.spatial import distance as dist import scipy.misc from imutils import perspective from imutils import contours import numpy as np import argparse import imutils import cv2 import scipy.ndimage ##################### image = cv2.imread("IMG_0067.JPG", cv2.IMREAD_UNCHANGED); gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #gray = cv2.GaussianBlur(gray, (7, 7), 0) edges = cv2.Canny(gray, 20, 80) scipy.misc.imsave('edge5.jpg', edges) edges2 = scipy.ndimage.binary_dilation(edges).astype(edges.dtype) edges2 = scipy.ndimage.binary_erosion(edges).astype(edges.dtype) scipy.misc.imsave('edge6.jpg', edges2) ret, thresh = cv2.threshold(edges, 127, 255, 0) contour, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) d = cv2.drawContours(gray, contours, -1, (0,255,0), 3) scipy.misc.imsave('d.jpg', d) binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] ret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY) scipy.misc.imsave('outfile.jpg', gray)
def chambai(self): for i in range(len(self.file1)): self.anh = self.file1[i] # self.hx = cv2.cvtColor(self.img1, cv2.COLOR_BGR2GRAY) # self.hm = cv2.GaussianBlur(self.hx, (5,5),0) # self.hc = cv2.Canny(self.hm, 75, 200) self.can1 = Canvas(self.root, width=2000, height=2000) self.can1.place(x=870, y=80) self.mn = cv2.imread(self.anh) self.hx = cv2.cvtColor(self.mn, cv2.COLOR_BGR2GRAY) self.hm = cv2.GaussianBlur(self.hx, (5, 5), 0) self.hc = cv2.Canny(self.hm, 75, 200) self.khungbt = cv2.findContours(self.hc.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.khungbt = imutils.grab_contours(self.khungbt) self.khung = None self.khung = self.timkhung(self.khungbt) self.baithi = four_point_transform(self.mn, self.khung.reshape(4, 2)) #------ self.khungtln = self.timkhungcham(self.baithi) self.khung1n = self.timo1(self.khungtln, 0) self.baithi = four_point_transform(self.baithi, self.khung1n.reshape(4, 2)) #--- self.baithi = self.baithi[20:self.baithi.shape[0] - 20, 20:self.baithi.shape[1] - 20] #XOAY BAI THI self.phantren = self.baithi[0:self.baithi.shape[0] / 34, 0:self.baithi.shape[1]] self.row, self.col, self.cha = self.baithi.shape self.dem1 = 0 for i in range(self.phantren.shape[0]): for j in range(self.phantren.shape[1]): if self.phantren[i, j, 0] < 100: self.dem1 = self.dem1 + 1 #print self.dem1 if self.dem1 < 1000: self.dem2 = 0 self.benphai = self.baithi[0:self.baithi.shape[0], 0:self.baithi.shape[1] / 34] for i in range(self.benphai.shape[0]): for j in range(self.benphai.shape[1]): if self.benphai[i, j, 0] < 150: self.dem2 = self.dem2 + 1 #print self.dem2 if self.dem2 < 1000: self.dem3 = 0 self.bentrai = self.baithi[0:self.baithi.shape[0], self.baithi.shape[1] - self.baithi.shape[1] / 34:self.baithi.shape[1]] for i in range(self.bentrai.shape[0]): for j in range(self.bentrai.shape[1]): if self.bentrai[i, j, 0] < 50: self.dem3 = self.dem3 + 1 #print self.dem3 if self.dem3 < 1000: self.r = cv2.getRotationMatrix2D( (self.col / 2, self.row / 2), 180, 1) self.baithi = cv2.warpAffine(self.baithi, self.r, (self.col, self.row)) else: self.r = cv2.getRotationMatrix2D( (self.col / 2, self.row / 1.488), 90, 1) self.baithi = cv2.warpAffine(self.baithi, self.r, (self.row, self.col)) else: self.r = cv2.getRotationMatrix2D( (self.col / 2.525, self.row / 2), -90, 1) self.baithi = cv2.warpAffine(self.baithi, self.r, (self.row, self.col)) #NHAN DANG SO CAU HOI self.nhandang = self.baithi[1:self.row / 20, 0:self.col] self.khungdang = self.chuyendoi(self.nhandang) self.khungnhandang = self.timkhung(self.khungdang) self.nhandang = four_point_transform( self.nhandang, self.khungnhandang.reshape(4, 2)) self.nhandang = self.nhandang[6:self.nhandang.shape[0] - 6, 6:self.nhandang.shape[1] - 6] self.phunhandang = cv2.cvtColor(self.nhandang, cv2.COLOR_BGR2GRAY) self.thresh0 = cv2.threshold( self.phunhandang, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] self.tlkhung0 = cv2.findContours(self.thresh0.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.tlkhung0 = imutils.grab_contours(self.tlkhung0) self.n = self.tam(self.tlkhung0, self.thresh0) p = 0 if self.n == 1: p = 10 else: if self.n == 2: p = 15 else: p = 20 dung = 0 socauhoi = 0 #ANSWER_KEY = {0:0,1:1,2:2,3:2,4:1,5:1,6:1,7:0,8:0,9:2,10: 0, 11: 0, 12: 1, 13: 2, 14: 2, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1} self.khungtl = self.timkhungcham(self.baithi) self.khung1 = self.timo1(self.khungtl, 0) self.khung2 = self.timo1(self.khungtl, 2) #self.o1 = four_point_transform(self.baithi, self.khung1.reshape(4, 2)) self.toado1 = self.khung1[0][0] self.d1 = (self.toado1[0] + self.toado1[1]) / 2 for o in range(4): if ((self.khung1[o][0][0] + self.khung1[o][0][1]) / 2 < self.d1): self.toado1 = self.khung1[o][0] self.d1 = (self.toado1[0] + self.toado1[1]) / 2 self.toado2 = self.khung2[0][0] self.d2 = (self.toado2[0] + self.toado2[1]) / 2 for o in range(4): if ((self.khung2[o][0][0] + self.khung2[o][0][1]) / 2 < self.d2): self.toado2 = self.khung2[o][0] self.d2 = (self.toado2[0] + self.toado2[1]) / 2 if (self.d1 > self.d2): self.tamp1 = self.khung1 self.tamp2 = self.khung2 self.toado = self.toado1 self.toado1 = self.toado2 self.toado2 = self.toado else: self.tamp1 = self.khung2 self.tamp2 = self.khung1 self.o1 = four_point_transform(self.baithi, self.tamp2.reshape(4, 2)) self.o1 = self.o1[5:self.o1.shape[0] - 5, 5:self.o1.shape[1] - 5] self.phukhung1 = cv2.cvtColor(self.o1, cv2.COLOR_BGR2GRAY) self.thresh1 = cv2.threshold( self.phukhung1, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] self.tlkhung1 = cv2.findContours(self.thresh1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.tlkhung1 = imutils.grab_contours(self.tlkhung1) cautraloi1 = [] for f in self.tlkhung1: (x, y, w, h) = cv2.boundingRect(f) ar = w / float(h) if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1: cautraloi1.append(f) cautraloi1 = contours.sort_contours(cautraloi1, method="top-to-bottom")[0] for (q, i) in enumerate(np.arange(0, len(cautraloi1), 4)): cautraloisx1 = contours.sort_contours(cautraloi1[i:i + 4])[0] bubbled1 = None for (j, c) in enumerate(cautraloisx1): matna1 = np.zeros(self.thresh1.shape, dtype="uint8") cv2.drawContours(matna1, [c], -1, 255, -1) matna1 = cv2.bitwise_and(self.thresh1, self.thresh1, mask=matna1) tong1 = cv2.countNonZero(matna1) if bubbled1 is None or tong1 > bubbled1[0]: bubbled1 = (tong1, j) color = (255, 0, 0) k = self.ANSWER_KEY[q] if k == bubbled1[1]: color = (0, 255, 0) dung += 1 cv2.drawContours(self.o1, [cautraloisx1[k]], -1, color, 3) self.baithi[self.toado1[1] + 5:self.toado1[1] + self.o1.shape[0] + 5, self.toado1[0] + 5:self.toado1[0] + self.o1.shape[1] + 5] = self.o1 self.o2 = four_point_transform(self.baithi, self.tamp1.reshape(4, 2)) self.o2 = self.o2[5:self.o2.shape[0] - 5, 5:self.o2.shape[1] - 5] self.phukhung2 = cv2.cvtColor(self.o2, cv2.COLOR_BGR2GRAY) self.thresh2 = cv2.threshold( self.phukhung2, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] self.tlkhung2 = cv2.findContours(self.thresh2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.tlkhung2 = imutils.grab_contours(self.tlkhung2) cautraloi2 = [] for f in self.tlkhung2: (x, y, w, h) = cv2.boundingRect(f) ar = w / float(h) if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1: cautraloi2.append(f) cautraloi2 = contours.sort_contours(cautraloi2, method="top-to-bottom")[0] for (q, i) in enumerate(np.arange(0, len(cautraloi2), 4)): cautraloisx2 = contours.sort_contours(cautraloi2[i:i + 4])[0] bubbled2 = None for (j, c) in enumerate(cautraloisx2): matna2 = np.zeros(self.thresh2.shape, dtype="uint8") cv2.drawContours(matna2, [c], -1, 255, -1) matna2 = cv2.bitwise_and(self.thresh2, self.thresh2, mask=matna2) tong2 = cv2.countNonZero(matna2) if bubbled2 is None or tong2 > bubbled2[0]: bubbled2 = (tong2, j) color = (255, 0, 0) k = self.ANSWER_KEY[p + q] if k == bubbled2[1]: color = (0, 255, 0) dung += 1 cv2.drawContours(self.o2, [cautraloisx2[k]], -1, color, 3) self.baithi[self.toado2[1] + 5:self.toado2[1] + self.o2.shape[0] + 5, self.toado2[0] + 5:self.toado2[0] + self.o2.shape[1] + 5] = self.o2 #print dung # NHAN DANG MSSV self.khung3 = self.timo1(self.khungtl, 8) self.o3 = four_point_transform(self.baithi, self.khung3.reshape(4, 2)) p = cv2.getRotationMatrix2D( (self.o3.shape[0] / 2, self.o3.shape[1] / 1.52), -90, 1) self.o3 = cv2.warpAffine(self.o3, p, (self.o3.shape[0], self.o3.shape[1])) self.o3 = self.o3[5:self.o3.shape[0] - 5, 5:self.o3.shape[1] - 5] self.phukhung3 = cv2.cvtColor(self.o3, cv2.COLOR_BGR2GRAY) self.thresh3 = cv2.threshold( self.phukhung3, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] self.tlkhung3 = cv2.findContours(self.thresh3.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.tlkhung3 = imutils.grab_contours(self.tlkhung3) cautraloi3 = [] for f in self.tlkhung3: (x, y, w, h) = cv2.boundingRect(f) ar = w / float(h) if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1: cautraloi3.append(f) cautraloi3 = contours.sort_contours(cautraloi3, method="top-to-bottom")[0] mssv = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0} for (q, i) in enumerate(np.arange(0, len(cautraloi3), 10)): cautraloisx3 = contours.sort_contours(cautraloi3[i:i + 10])[0] bubbled3 = None for (j, c) in enumerate(cautraloisx3): matna3 = np.zeros(self.thresh3.shape, dtype="uint8") cv2.drawContours(matna3, [c], -1, 255, -1) matna3 = cv2.bitwise_and(self.thresh3, self.thresh3, mask=matna3) tong3 = cv2.countNonZero(matna3) if bubbled3 is None or tong3 > bubbled3[0]: bubbled3 = (tong3, j) mssv[q] = 9 - bubbled3[1] self.msv = mssv[0] for i in range(1, 7): self.msv = (self.msv * 10) + mssv[i] #print self.msv if self.n == 1: socauhoi = 20 else: if self.n == 2: socauhoi = 30 else: socauhoi = 40 self.diem = float(dung * 10) / socauhoi #print self.diem cv2.putText(self.baithi, "{:.2f}".format(self.diem), (int( self.baithi.shape[0] / 2.5), int(self.baithi.shape[1] / 2.5)), cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 6) cv2.putText(self.baithi, "{:}".format(self.msv), (int( self.baithi.shape[0] / 4.5), int(self.baithi.shape[1] / 5.2)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3) #self.nhandang = imutils.resize(self.nhandang, height=50) self.baithi = imutils.resize(self.baithi, height=800) #self.o2 = imutils.resize(self.o2, height=800) self.im = PIL.ImageTk.PhotoImage( image=PIL.Image.fromarray(self.baithi)) self.can1.create_image(5, 5, anchor=NW, image=self.im) self.luubaikt(self.baithi, self.msv) f = open("/home/doan/Desktop/Ketqua/result.txt", "a") f.write( str(self.msv) + " " + str(self.diem) + " " + str(self.date.strftime("%Y%m%d %H:%M:%S") + "\n")) f.close() self.done = Label(self.root, text="Done!").place(x=20, y=405)
import cv2 import numpy as np cap=cv2.VideoCapture(0) while True : _, frame = cap.read() laplacian = cv2.Laplacian(frame,cv2.CV_64F) sobelx = cv2.Sobel(frame,cv2.CV_64F,1,0,ksize=5) sobely = cv2.Sobel(frame,cv2.CV_64F,0,1,ksize=5) edges = cv2.Canny(frame,150,150) cv2.imshow('org',frame) cv2.imshow('lap',laplacian) cv2.imshow('sobelx',sobelx) cv2.imshow('sobely',sobely) cv2.imshow('canny',edges) k = cv2.waitKey(5) & 0xFF if k == 27: break
def canny(image): gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) blur = cv2.GaussianBlur(gray, (5, 5), 0) canny = cv2.Canny(blur, 50, 150) return canny
def threadJHLineTracer(cam_obj): MAX_VEL = 0.15 f_list = [(-1, -1)] s_list = [(999, 999)] frame_count_w = frame_count_y = last_error = 0 t = threading.currentThread() while getattr(t, "run", True): frame_count_w += 1 frame_count_y += 1 ret, orig_frame = cam_obj.get_line_frame() if not ret: break draw_temp = orig_frame.copy() cutting_img = draw_temp[360:, :] y_roi = draw_temp[360:, :250] w_roi = draw_temp[360:, 310:] M = np.ones(y_roi.shape, dtype="uint8") * 90 y_roi = cv2.subtract(y_roi, M) min_lab = np.array([84, 110, 128]) min_lab = np.array([195, 140, 220]) # Convert the BGR image to other color spaces img_lab = cv2.cvtColor(y_roi, cv2.COLOR_BGR2LAB) img_lab = cv2.GaussianBlur(img_lab, (5, 5), 0) mask_lab_yellow = cv2.inRange(img_lab, min_lab, min_lab) mask_lab_yellow = cv2.erode(mask_lab_yellow, None, iterations=1) mask_lab_yellow = cv2.dilate(mask_lab_yellow, None, iterations=3) result_lab = cv2.bitwise_and(y_roi, y_roi, mask=mask_lab_yellow) edges = cv2.Canny(result_lab, 75, 150) # # cv2.imshow('yellow edges', edges) lines = cv2.HoughLinesP(edges, 1, np.pi / 360, 50, 10, maxLineGap=150) if lines is not None: del f_list[:] for line in lines: x1, y1, x2, y2 = line[0] f_list.append((x1, y1)) f_list.append((x2, y2)) cv2.circle(y_roi, max(f_list), 10, (0, 0, 255), -1) cv2.line(y_roi, (x1, y1), (x2, y2), (0, 255, 0), 4) # if frame_count_y > 5: # del f_list[:] # f_list.append((0,1)) M1 = np.ones(w_roi.shape, dtype="uint8") * 90 w_roi = cv2.subtract(w_roi, M1) min_lab = np.array([89, 112, 104]) min_lab = np.array([198, 142, 137]) img_lab = cv2.cvtColor(w_roi, cv2.COLOR_BGR2LAB) img_lab = cv2.GaussianBlur(img_lab, (5, 5), 0) mask_lab_white = cv2.inRange(img_lab, min_lab, min_lab) mask_lab_white = cv2.erode(mask_lab_white, None, iterations=1) mask_lab_white = cv2.dilate(mask_lab_white, None, iterations=3) result_lab1 = cv2.bitwise_and(w_roi, w_roi, mask=mask_lab_white) edges = cv2.Canny(result_lab1, 75, 150) # cv2.imshow('white edges', edges) lines = cv2.HoughLinesP(edges, 1, np.pi / 360, 50, 10, maxLineGap=150) if lines is not None: del s_list[:] for line in lines: x1, y1, x2, y2 = line[0] s_list.append((x1 + 310, y1)) s_list.append((x2 + 310, y2)) (c_x, c_y) = min(s_list) cv2.circle(w_roi, (c_x - 310, c_y), 10, (255, 255, 0), -1) cv2.line(w_roi, (x1, y1), (x2, y2), (0, 0, 255), 4) # if frame_count_w > 5: # del s_list[:] # s_list.append((640,1)) # cv2.line(frame, ((w_x1+y_x1)//2, (w_y1+y_y1)//2),((w_x2+y_x2)//2,(y_y2+w_y2)//2),(0, 255, 255), 4) (x1, y1) = min(s_list) (x2, y2) = max(f_list) # if(x1 >= x2): # x2 = 120 if (310 < x1 < 340) and (220 < x2 < 250): if y1 > y2: x1 = 550 elif y1 < y2: x2 = 100 # print('y;', x2) # print('w:', x1) ave = (x1 + x2) // 2 cv2.circle(cutting_img, ((x1 + x2) // 2, (y1 + y2) // 2), 10, (200, 0, 25), -1) # cv2.imshow("frame", cutting_img) # cv2.imshow('yroi', y_roi) # cv2.imshow('wroi', w_roi) # # print("s_list : ", min(s_list)) error = ave - 300 # print(error) # # print(error, " error") ros_kp = 0.0055 ros_kd = 0.0075 # # print(error) angular_z = ros_kp * error + ros_kd * (error - last_error) # # print(angular_z, "is angular_z") last_error = error linear_x = min(MAX_VEL * ((1 - abs(error) / 500)**2.2), 0.2) # # #print(linear_x, " is linear_x") angular_z = -min(angular_z, 2.0) if angular_z < 0 else -max( angular_z, -2.0) # # print(angular_z, " is twist angular_z") # pub_linear.publish(linear_x) # pub_angular.publish(angular_z) # # print(draw_temp.shape) t_move(linear_x, angular_z) if chr(cv2.waitKey(1) & 255) == 'q': t_move(0, 0) break pass t_move(0, 0) pass
def auto_canny(image): lower = 100 upper = 200 edged = cv2.Canny(image, lower, upper) # return the edged image return edged
Parado Sulca Yurgen Rodriguez Manuelo Jhoelver """ import cv2 """ Se lee la imagen del directorio del proyecto """ imagen=cv2.imread('figuras.png') """ Se modifican los colores de la imagen a escala de grises """ grises=cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY) """ Se binariza la imagen con la función canny """ bordesAislados=cv2.Canny(grises, 100, 150) """ Se le aplica dilatación y erosión a la imagen para facilitar su identificación """ bordesAislados=cv2.dilate(bordesAislados, None, iterations=1) bordesAislados=cv2.erode(bordesAislados, None, iterations=1) """ Se encuentran los contornos externos de la imagen binarizada con la función findcontours Parámetros:imagen a procesar, modo de recuperación, método de almacenamiento """ contornos,_=cv2.findContours(bordesAislados, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) """ Se inicia un bucle para poder trabajar con los pixeles """ for i in contornos:
import cv2 as cv import matplotlib.pyplot as plt import numpy as np img = cv.imread('images/sudoku.jpg') gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) edges = cv.Canny(gray, 50, 150, apertureSize=3) lines = cv.HoughLines(edges, 1, np.pi / 180, 200) for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho # x1 stores the rounded off value of (r * cos(theta) - 1000 * sin(theta)) x1 = int(x0 + 1000 * (-b)) # y1 stores the rounded off value of (r * sin(theta)+ 1000 * cos(theta)) y1 = int(y0 + 1000 * (a)) # x2 stores the rounded off value of (r * cos(theta)+ 1000 * sin(theta)) x2 = int(x0 - 1000 * (-b)) # y2 stores the rounded off value of (r * sin(theta)- 1000 * cos(theta)) y2 = int(y0 - 1000 * (a)) cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
def ImagProgressHSV(filename, flag, flag2): """ 处理图片,进行阈值化。 :param filename: 图片一张,由getImag函数获得 :param flag: “ball”为处理球,进行霍夫圆变换,“hole”为处理球门,生成外接矩形 :param flag2: 0为上摄像头传回来的图片,1为下摄像头传回来的图片 :return: 两个元素的数组,球返回球心的x,y坐标。球洞返回矩形的下边中心x,y坐标,frame黑白图一张,供遍历像素函数使用 """ Image = filename # Image = cv.imread(filename) # 得到图片,搞高斯滤波+色彩转换 # cv.namedWindow('test', cv.WINDOW_NORMAL) Image_Gau = cv.GaussianBlur(Image, (9, 9), 0) Image_HSV = cv.cvtColor(Image_Gau, cv.COLOR_BGR2HSV) # print Image_HSV[180, 180] # HSV阈值设定 lowarrayball = np.array([150, 43, 46]) higharrayball = np.array( [180, 255, 255]) # 此处使用反向思维,直接允许红色之外的所有值通过,红色部分为黑色,在识别下摄像头时效果不错 lowarrayhole = np.array([100, 43, 46]) higharrayhole = np.array([124, 255, 255]) # 青色+蓝色 # lowarraydark = np.array([0, 43, 46]) # higharraydark = np.array([10, 255, 255]) # 二值化 dstball = cv.inRange(Image_HSV, lowarrayball, higharrayball) dsthole = cv.inRange(Image_HSV, lowarrayhole, higharrayhole) # dstdark = cv.inRange(Image_HSV, lowarraydark, higharraydark) # 中值滤波降噪,开运算降噪 MedirImagball = cv.medianBlur(dstball, 3) MedirImaghole = cv.medianBlur(dsthole, 3) element = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) MedirImagball = cv.morphologyEx(MedirImagball, cv.MORPH_OPEN, element) MedirImaghole = cv.morphologyEx(MedirImaghole, cv.MORPH_OPEN, element) cv.imshow('mediaball', MedirImagball) cv.imshow('mediahole', MedirImaghole) resultball = cv.Canny(MedirImagball, 50, 150) resulthole = cv.Canny(MedirImaghole, 50, 150) # cv.imshow('canny', result) # MedirImagdark = cv.medianBlur(dstdark, 9) # element = cv.getStructuringElement(cv.MORPH_RECT, (13, 13)) # MedirImagdark = cv.morphologyEx(MedirImagdark, cv.MORPH_OPEN, element) # cv.imshow('testdark', MedirImag) # dark = cv.addWeighted(MedirImagdark, 0.5, MedirImag, 0.5, 0) # cv.imshow('add', dark) # 霍夫变换检测圆心cv.waitKey(0) # if flag == 'ball': # circles = cv.HoughCircles(result, cv.HOUGH_GRADIENT, 1, 60, param1=1, param2=5, minRadius=1, maxRadius=20) # # 参数4是圆心距离,param2是v2.HOUGH_GRADIENT方法的累加器阈值。阈值越小,检测到的圈子越多。 # # print circles # i = 0 # for ci in circles[0]: # i += 1 # if i != 1: # for circle in circles[0]: # if judgecircle(Image, circle): # circleflag = circle # else: # circleflag = circles[0] # print circleflag # x = int(circleflag[0][0]) # y = int(circleflag[0][1]) # r = int(circleflag[0][2]) # result = cv.circle(result, (x, y), r, (255, 255, 255), 1) # result = cv.circle(result, (x, y), 2, (255, 255, 255), -1) # data = [x, y] # if flag == 'hole': # x, y, w, h = cv.boundingRect(result) # cv.rectangle(result, (x, y), (x + w, y + h), (255, 255, 255), 2) # data = [x + w / 2, y + h] # # print data # cv.imshow('test', result) # TODO 此处的try希望能够在没有检测到园的时候不让程序崩溃,并且希望能返回一个值用于目标是否存在的检测 try: circles = cv.HoughCircles(resultball, cv.HOUGH_GRADIENT, 1, 60, param1=1, param2=5, minRadius=10, maxRadius=50) # 参数4是圆心距离,param2是v2.HOUGH_GRADIENT方法的累加器阈值。阈值越小,检测到的圈子越多。 # print circles for circle in circles[0]: x = int(circle[0]) y = int(circle[1]) r = int(circle[2]) Imag1 = cv.circle(Image, (x, y), r, (0, 255, 0), 1) result = cv.circle(Imag1, (x, y), 2, (0, 255, 0), -1) databall = [x, y] except TypeError: print 'NULL NULL NULL NULL NULL NULL NULL' try: x, y, w, h = cv.boundingRect(resulthole) cv.rectangle(Image, (x, y), (x + w, y + h), (0, 255, 0), 2) result = Image datahole = [x + w / 2, y + h] except TypeError: print 'NULL NULL NULL NULL NULL NULL NULL' # print data cv.imshow('test', result) try: data = [databall, datahole] return data, MedirImagball except UnboundLocalError: print 'NULL NULL NULL NULL NULL NULL NULL' return [[0, 0], [0, 0]], MedirImagball
""" cv2.HoughLines(): 参数一:二值化图像,或者Canny边缘检测。 参数二:距离精度 参数三:弧度精度 参数四:阈值。 """ import cv2 import numpy as np img = cv2.imread("./image/form.jpg") gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5, 5), 0) edges = cv2.Canny(blur, 50, 150, apertureSize=3) lines = cv2.HoughLines(edges, 1, np.pi / 180, 200) for i in range(len(lines)): # 将所有线全部标出。 for rho, theta in lines[i]: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 3)
# construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="path to the input image") args = vars(ap.parse_args()) # define the answer key which maps the question number # to the correct answer ANSWER_KEY = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1} # load the image, convert it to grayscale, blur it # slightly, then find edges image = cv2.imread(args["image"]) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(blurred, 75, 200) # find contours in the edge map, then initialize # the contour that corresponds to the document cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] docCnt = None # ensure that at least one contour was found if len(cnts) > 0: # sort the contours according to their size in # descending order cnts = sorted(cnts, key=cv2.contourArea, reverse=True) # loop over the sorted contours
import cv2 import numpy as np from matplotlib import pyplot as plt img = cv2.imread('c.jpg', 0) edges = cv2.Canny(img, 100, 200) plt.subplot(121), plt.imshow(img, cmap='gray') plt.title('Original Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122), plt.imshow(edges, cmap='gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.show()
def draw_the_lines(img, lines): img=np.copy(img) blank_image=np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) for line in lines: for x1,y1,x2,y2 in line: cv2.line(blank_image,(x1,y1), (x2,y2), (0,255,255), thickness=4) img= cv2.addWeighted(img, 0.8, blank_image, 1.0, 0.0) return img image=cv2.imread("road_main.png") image=cv2.cvtColor(image, cv2.COLOR_BGR2RGB) print(image.shape) height=image.shape[0] width=image.shape[1] region_of_interset_verticies=[ (0, height), (width/2, height/2), (width, height) ] gray_image=cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) canny_image=cv2.Canny(gray_image, 100, 200) croped_image=region_of_interest(canny_image,np.array([region_of_interset_verticies], np.int32)) lins=cv2.HoughLinesP(croped_image, rho=6, theta=np.pi/60, threshold=160, lines=np.array([]), minLineLength=40, maxLineGap=25) image_with_lines=draw_the_lines(image, lins) plt.imshow(image_with_lines) plt.show()
def _save_images(img, boxes, output_dir, prefix=""): # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # expected_width = 128 # expected_height = 128 # print(len(boxes)) for i, bbox in enumerate(boxes): # print(b) # print("i value is",i) # print("bbox value is", bbox) l, t, r, b = bbox # print(l, t, r, b) # cv2.imshow("sdfds",bbox) chip = img[t:b, l:r] chipgray = cv2.cvtColor(chip, cv2.COLOR_BGR2GRAY) # cv2.imshow("chip", chip) # cv2.imshow("chipgray", chipgray) # cv2.waitKey(0) # print(chip.shape[0], chip.shape[1]) # newimg=chip[0:100,100:200] # newimg=chip[t:b,l:r] # print(chip) # width, height = chip.shape[1], chip.shape[0] # print(width, height) # crop_width = expected_width if expected_width < chip.shape[1] else chip.shape[1] # crop_height = expected_height if expected_height < chip.shape[0] else chip.shape[0] # mid_x, mid_y = int(width/2), int(height/2) # cw2, ch2 = int(crop_width/2), int(crop_height/2) # cropped_image = chip[100:200, mid_x-cw2:mid_x+cw2] # a=cv2.imread() # print(chip.shape[0], chip.shape[1]) # following line of code is for inverting the image while saving in the directory # newimage=chip[] chipblur = cv2.GaussianBlur(chipgray, (5, 5), 1) imgCanny = cv2.Canny(chipblur, 10, 250) # img_invert = cv2.bitwise_not(thresh) # cv2.imshow("threshimage",img_invert) # cv2.waitKey(0) # print(thresh) cnts, hierarchies = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # idx = 0 # cv2.imshow("cnts image is this ",cnts) # cv2.waitKey(0) for c in cnts: x, y, w, h = cv2.boundingRect(c) # print(x, y, w, h) # print(c) if w > 10 and h > 10: # idx += 1 new_img = chip[y - 5:y + h + 5, x - 5:x + w + 5] img_resize = cv2.resize(new_img, (128, 128)) gray_image = cv2.cvtColor(img_resize, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(gray_image, 150, 255, cv2.THRESH_BINARY_INV) # img_resize = cv2.resize(new_img, (64, 64)) # img_invert = cv2.bitwise_not(gray_image) kernel = np.ones((3, 3), np.uint8) img_dilated = cv2.dilate(thresh, kernel, iterations=1) cv2.imwrite( os.path.join(output_dir, prefix + "_" + str(i) + ".jpg"), img_dilated)
import cv2 import numpy as np img = cv2.imread('hammer.png') gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray,50,120) minLineLength = 20 maxLineGap = 5 lines = cv2.HoughLinesP(edges,1,np.pi/180,20,minLineLength,maxLineGap) for x1,y1,x2,y2 in lines[0]: cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2) cv2.imshow("edges", edges) cv2.imshow("lines", img) cv2.waitKey() cv2.destroyAllWindows()
# white to black there is slope change which is # detected with Laplacian and then this gradient # value is mapped for the opencv suitable image # value ( absolute since image pixel values can only be +ve) lap = cv.Laplacian(gray, cv.CV_64F) lap = np.uint8(np.absolute(lap)) # Sobel # Sobel computes gradients in 2 dir sobelx = cv.Sobel(gray, cv.CV_64F, 1, 0) sobely = cv.Sobel(gray, cv.CV_64F, 0, 1) cv.imshow('Sobel X', sobelx) cv.imshow('Sobel y', sobely) # Combined sobel images combines_sobel = cv.bitwise_or(sobelx, sobely) cv.imshow('Combine', combines_sobel) # Canny # Advanced algorithm, it uses sobel inside aaayyy canny = cv.Canny(gray, 150, 175) cv.imshow("canny", canny) cv.waitKey(0)
def forward(self, x, return_att=False): x_size = x.size() #Encoder conv1 = self.conv1(x) conv2 = self.conv2t(self.conv2(conv1)) conv3 = self.conv3t(self.conv3(conv2)) conv4 = self.conv4t(self.conv4(conv3)) conv5 = self.conv5(conv4) #Shape Stream ss = F.interpolate(self.d0(conv2), x_size[2:], mode='bilinear', align_corners=True) ss = self.res1(ss) c3 = F.interpolate(self.c3(conv3), x_size[2:], mode='bilinear', align_corners=True) ss = self.d1(ss) ss, g1 = self.gate1(ss, c3) ss = self.res2(ss) ss = self.d2(ss) c4 = F.interpolate(self.c4(conv4), x_size[2:], mode='bilinear', align_corners=True) ss, g2 = self.gate2(ss, c4) ss = self.res3(ss) ss = self.d3(ss) c5 = F.interpolate(self.c5(conv5), x_size[2:], mode='bilinear', align_corners=True) ss, g3 = self.gate3(ss, c5) ss = self.fuse(ss) ss = F.interpolate(ss, x_size[2:], mode='bilinear', align_corners=True) edge_out = self.sigmoid(ss) ### Canny Edge im_arr = np.mean(x.cpu().numpy(), axis=1).astype(np.uint8) canny = np.zeros((x_size[0], 1, x_size[2], x_size[3])) for i in range(x_size[0]): canny[i] = cv2.Canny(im_arr[i], 10, 100) canny = torch.from_numpy(canny).cuda().float() ### End Canny Edge cat = torch.cat([edge_out, canny], dim=1) acts = self.cw(cat) acts = self.sigmoid(acts) edge = self.expand(acts) #Decoder conv2 = F.interpolate(conv2, scale_factor=2, mode='bilinear', align_corners=True) conv3 = F.interpolate(conv3, scale_factor=2, mode='bilinear', align_corners=True) conv4 = F.interpolate(conv4, scale_factor=2, mode='bilinear', align_corners=True) center = self.center(self.pool(conv5)) dec5, att5 = self.dec5([center, conv5]) dec4, att4 = self.dec4([dec5, conv4]) dec3, att3 = self.dec3([dec4, conv3]) dec2, att2 = self.dec2([dec3, conv2]) dec1 = self.dec1(dec2) dec0 = self.dec0(torch.cat([dec1, edge], dim=1)) x_out = self.final(dec0) att2 = F.interpolate(att2, scale_factor=2, mode='bilinear', align_corners=True) att3 = F.interpolate(att3, scale_factor=4, mode='bilinear', align_corners=True) att4 = F.interpolate(att4, scale_factor=8, mode='bilinear', align_corners=True) att5 = F.interpolate(att5, scale_factor=16, mode='bilinear', align_corners=True) if return_att: return x_out, edge_out, [att2, att3, att4, att5, g1, g2, g3] return x_out, edge_out
import numpy as np import imutils import easyocr import pymongo client = pymongo.MongoClient("mongodb+srv://archita:[email protected]/myFirstDatabase?retryWrites=true&w=majority") db = client['number_plate_project'] img = cv2.imread('temp.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) plt.imshow(cv2.cvtColor(gray, cv2.COLOR_BGR2RGB)) bfilter = cv2.bilateralFilter(gray, 11, 17, 17) #Noise reduction edged = cv2.Canny(bfilter, 30, 200) #Edge detection plt.imshow(cv2.cvtColor(edged, cv2.COLOR_BGR2RGB)) keypoints = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours = imutils.grab_contours(keypoints) contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10] location = None for contour in contours: approx = cv2.approxPolyDP(contour, 10, True) if len(approx) == 4: location = approx break #location mask = np.zeros(gray.shape, np.uint8) new_image = cv2.drawContours(mask, [location], 0,255, -1) new_image = cv2.bitwise_and(img, img, mask=mask)
import cv2 import numpy as np from matplotlib import pyplot as plt import math pdf_path = './pictures/ellispe.png' img = cv2.imread(pdf_path) # img=cv2.blur(img,(1,1)) imgray = cv2.Canny(img, 600, 100, 3) # Canny边缘检测,参数可更改 # cv2.imshow("0",imgray) ret, thresh = cv2.threshold(imgray, 127, 255, cv2.THRESH_BINARY) image, contours = cv2.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # contours为轮廓集,可以计算轮廓的长度、面积等 for cnt in contours: if len(cnt) > 50: S1 = cv2.contourArea(cnt) ell = cv2.fitEllipse(cnt) S2 = math.pi * ell[1][0] * ell[1][1] if (S1 / S2) > 0.2: # 面积比例,可以更改,根据数据集。。。 img = cv2.ellipse(img, ell, (0, 255, 0), 2) print( str(S1) + " " + str(S2) + " " + str(ell[0][0]) + " " + str(ell[0][1])) cv2.imshow("0", img) cv2.waitKey(0)