height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) fps = cap.get(cv2.CAP_PROP_FPS) fourcc = cv2.VideoWriter_fourcc(*'DIVX') # 코덱 정의 out = cv2.VideoWriter('out.avi', fourcc, fps, (int(width), int(height))) # VideoWriter 객체 정의 while (cap.isOpened()): ret, frame = cap.read() top_frame = frame[:470, :, :] vmid_frame = frame[470:662, :, :] bot_frame = frame[662:, :, :] gray = cv2.cvtColor(vmid_frame, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 200, 300) lines = cv2.HoughLines(edges, 1, np.pi / 180, 100) # 마지막게 Threshold for i in range(len(lines)): for rho, theta in lines[i]: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(vmid_frame, (x1, y1), (x2, y2), (0, 0, 255), 2) result = np.vstack((top_frame, vmid_frame, bot_frame))
import cv2.cv as cv import time cap = cv2.VideoCapture(0) cap.set(3, 640) cap.set(4, 480) #http://opencv-code.com/tutorials/automatic-perspective-correction-for-quadrilateral-objects/ while (True): # Capture frame-by-frame ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 100, 200, 3) lines = cv2.HoughLines(edges, 1, np.pi / 180, 130, 150) #minLineLength,maxLineGap if lines == None: continue for rho, theta in lines[0]: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho extend = 1000 x1 = int(x0 + extend * (-b)) y1 = int(y0 + extend * (a)) x2 = int(x0 - extend * (-b)) y2 = int(y0 - extend * (a)) cv2.line(gray, (x1, y1), (x2, y2), (0, 0, 255), 2)
# Img Difference (Assume objects station /slow motion) resized = resized.astype("int16") resized0 = resized0.astype("int16") frame1 = np.absolute(resized - resized0) frame1 = frame1.astype("uint8") # Non-maximum Suppression (replace with thresholding) # frame1[frame1<15] = 0 # cv2.imshow('Frame',frame1) # Hough transform edges = cv2.Canny(frame1,150,230, apertureSize = 3) # cv2.imshow('edges', edges) lines = cv2.HoughLines(edges,1,np.pi/180,22) # Or try HoughLinesP if lines is not None: temp = [0,0,0,0] for rho,theta in lines[:,0,:]: a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho temp[0] += (x0 + 1000*(-b)) temp[1] += (y0 + 1000*(a)) temp[2] += (x0 - 1000*(-b)) temp[3] += (y0 - 1000*(a)) no_lines = len(lines[:,0,:]) x1 = int((temp[0]/no_lines)*8) y1 = int((temp[1]/no_lines)*8)+10 # adjustment (shift) x2 = int((temp[2]/no_lines)*8)
def align(filename): im = cv2.imread(filename) gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) cv2.imshow("gray", R(gray)) edges = cv2.Canny(gray, 220, 300, apertureSize=3) cv2.imshow("edges", R(edges)) cv2.waitKey(0) #pic5 200 else 60 lines = cv2.HoughLines(edges, 1, np.pi / 360, 200) print("lines: ", len(lines)) def merge_lines(lines): nlines = [line[0] for line in lines] km = KMeans(4) km.fit(nlines) return km.cluster_centers_ lines = merge_lines(lines) print(lines) result = im.copy() h, w, c = result.shape for line in lines: # line: xcos(t) + ysin(y) - p = 0 rho, theta = line _cos = np.cos(theta) _sin = np.sin(theta) if _cos != 0: pt1 = (int(rho / _cos), 0) pt2 = (int((rho - h * _sin) / _cos), h) else: #horizontal pt1 = (0, rho) pt2 = (w, rho) cv2.line(result, pt1, pt2, (0, 0, 255), 3) # 求交点 def get_intersection(a, b): #a: (rho1, theta1) #b: (rho2, theta2) _cos0 = np.cos(a[1]) _sin0 = np.sin(a[1]) _cos1 = np.cos(b[1]) _sin1 = np.sin(b[1]) if _sin1 != 0: t = _sin0 / _sin1 x = (a[0] - t * b[0]) / (_cos0 - _cos1 * t) y = (b[0] - x * _cos1) / _sin1 return np.array([x, y]) x = b[0] / _cos1 y = (a[0] - x * _cos0) / _sin0 return np.array([x, y]) tts = np.vstack([get_intersection(lines[3], lines[j]) for j in range(3)]) rec = np.sum(np.multiply(tts, tts), 1) ai = np.argmax(rec) #lines[3] 和 lines[ai] 几乎平行 pts = [] for j in range(3): if j != ai: pts.append(tts[j]) pts.extend( [get_intersection(lines[ai], lines[j]) for j in range(3) if j != ai]) for pt in pts: xy = (int(pt[0]), int(pt[1])) cv2.circle(result, xy, 32, (255, 0, 0), 32) cnts = [0] * 4 for i in range(4): for j in range(4): if pts[i][0] > pts[j][0]: cnts[i] += 1 if pts[i][1] > pts[j][1]: cnts[i] += 1 lt = np.argmin(cnts) rb = np.argmax(cnts) # 计算右上角 maxdx = -np.inf rt = -1 for i in range(4): if i != lt and i != rb: dx = pts[i][0] - pts[lt][0] if dx > maxdx: maxdx = dx rt = i # 计算左下角 for i in range(4): if i != lt and i != rb and i != rt: lb = i paper_p = np.array([(0, 0), (PAPER_WIDTH, 0), (PAPER_WIDTH, PAPER_HEIGHT), (0, PAPER_HEIGHT)]).astype(np.float32) ''' pts = [pts[i].tolist() for i in range(4)] pts.sort() [lt, rt, rb, lb] = [1,3,2,0] sp = np.array([pts[i] for i in [lt,rt,rb,lb]]).astype(np.float32) #print (sp) ''' sp = np.array([pts[i] for i in [lt, rt, rb, lb]]).astype(np.float32) M = cv2.getPerspectiveTransform(sp, paper_p) paper = cv2.warpPerspective(im, M, (int(PAPER_WIDTH), int(PAPER_HEIGHT))) result = cv2.resize(result, (1080, 720)) cv2.imshow("source", R(im)) cv2.waitKey(0) cv2.imshow("lines", R(result)) cv2.waitKey(0) cv2.imshow("paper", R(paper)) cv2.waitKey(0) return paper
verticalsize = rows // 30 # 30 is the origin one; choose the resolution # Create structure element for extracting vertical lines through morphology operations verticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, verticalsize)) # Apply morphology operations vertical = cv2.erode(vertical, verticalStructure) vertical = cv2.dilate(vertical, verticalStructure) # print(len(vertical[0])) # # implt(vertical) gray = cv2.addWeighted(horizontal,0.5,vertical,0.5,0) # gray = cv2.addWeighted(horizontal,0.5,vertical,0.5,0) implt(gray,'gray') cv2.imwrite('../data/pages/separated_lines.jpg',gray) horizontal_lines = cv2.HoughLines(horizontal,1,np.pi/180,700) # h800 horizontal_boundary = [] print(len(horizontal_lines)) # lines = cv2.HoughLinesP(edges,1,np.pi/180,100,100,10) for i in range(len(horizontal_lines)): for rho,theta in horizontal_lines[i]: a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*(a)) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) # print(x1,x2,y1,y2) if (abs(y1-y2) <= 2):
def identifica_cor(frame): ''' Segmenta o maior objeto cuja cor é parecida com cor_h (HUE da cor, no espaço HSV). ''' hsv1_M = np.array([ 0, 0, 0], dtype=np.uint8) hsv2_M= np.array([0, 0, 255], dtype=np.uint8) # placeholdersmaior valores_esq = { "a_esq" : [], "b_esq" : [], "rho_esq" : [],"aMed_esq" : 1.0, "bMed_esq" : 1.0, "rhoMed_esq" : 1.0} valores_dir = { "a_dir" : [], "b_dir" : [], "rho_dir" : [],"aMed_dir" : 1.0, "bMed_dir" : 1.0, "rhoMed_dir" : 1.0} #aMed_esq = 1 #bMed_esq = 1 #rhoMed_esq = 1 #aMed_dir = 1 #bMed_dir = 1 #rhoMed_dir = 1 min_length = 50 # Melhorar mascara e aumentar min_len lista_ab = [] #a_esq = [] #b_esq = [] #rho_esq = [] #a_dir = [] #b_dir = [] #rho_dir = [] hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask1 = cv2.inRange(hsv, hsv1_M, hsv2_M) seg = cv2.morphologyEx(mask1,cv2.MORPH_CLOSE,np.ones((1, 1))) selecao = cv2.bitwise_and(frame, frame, mask=seg) blur = cv2.GaussianBlur(selecao,(5,5),0) min_contrast = 50 max_contrast = 250 linhas = cv2.Canny(blur, min_contrast, max_contrast ) bordas_color = cv2.cvtColor(linhas, cv2.COLOR_RGB2BGR) lines = cv2.HoughLines(linhas, 1, np.pi/180, min_length) if lines is not None: for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) lista_ab.append([a, b, rho]) for abrho in lista_ab: if -18 < abrho[0] < -0.1 : valores_esq["a_esq"].append(abrho[0]) valores_esq["b_esq"].append(abrho[1]) valores_esq["rho_esq"].append(abrho[2]) elif 18 > abrho[0] > 0.1 : valores_dir["a_dir"].append(abrho[0]) valores_dir["b_dir"].append(abrho[1]) valores_dir["rho_dir"].append(abrho[2]) if (len(valores_esq["a_esq"]) & len(valores_esq["b_esq"]) & len(valores_esq["rho_esq"])) != 0: valores_esq["aMed_esq"] = sum(valores_esq["a_esq"]) / len(valores_esq["a_esq"]) valores_esq["bMed_esq"] = sum(valores_esq["b_esq"]) / len(valores_esq["b_esq"]) valores_esq["rhoMed_esq"] = sum(valores_esq["rho_esq"]) / len(valores_esq["rho_esq"]) if (len(valores_dir["a_dir"]) & len(valores_dir["b_dir"]) & len(valores_dir["rho_dir"])) != 0: valores_dir["aMed_dir"] = sum(valores_dir["a_dir"]) / len(valores_dir["a_dir"]) valores_dir["bMed_dir"] = sum(valores_dir["b_dir"]) / len(valores_dir["b_dir"]) valores_dir["rhoMed_dir"] = sum(valores_dir["rho_dir"]) / len(valores_dir["rho_dir"]) desenhar_reta_media(frame, valores_esq["aMed_esq"], valores_esq["bMed_esq"], valores_esq["rhoMed_esq"]) desenhar_reta_media(frame, valores_dir["aMed_dir"], valores_dir["bMed_dir"], valores_dir["rhoMed_dir"]) x_ponto, y_ponto = interseccao(frame, valores_esq["aMed_esq"], valores_esq["bMed_esq"], valores_esq["rhoMed_esq"], valores_dir["aMed_dir"], valores_dir["bMed_dir"], valores_dir["rhoMed_dir"]) media = (x_ponto,y_ponto) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(mask1,'Press q to quit',(0,50), font, 1,(255,255,255),2,cv2.LINE_AA) cv2.imshow ('Frame', frame) centro = (frame.shape[1]//2, frame.shape[0]//2) return media, centro
dst = cv2.Canny(src, 50, 200) # aplica o detector de bordas de Canny à imagem src cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR) # Converte a imagem para BGR para permitir desenho colorido if True: # HoughLinesP lines = cv2.HoughLinesP(dst, 10, math.pi/180.0, 100, np.array([]), 5, 5) print("Used Probabilistic Rough Transform") print("The probabilistic hough transform returns the end points of the detected lines") a,b,c = lines.shape print("Valor de A",a, "valor de lines.shape", lines.shape) for i in range(a): # Faz uma linha ligando o ponto inicial ao ponto final, com a cor vermelha (BGR) cv2.line(cdst, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv2.LINE_AA) else: # HoughLines # Esperemos nao cair neste caso lines = cv2.HoughLines(dst, 1, math.pi/180.0, 50, np.array([]), 0, 0) a,b,c = lines.shape for i in range(a): rho = lines[i][0][0] theta = lines[i][0][1] a = math.cos(theta) b = math.sin(theta) x0, y0 = a*rho, b*rho pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) ) pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) ) cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA) print("Used old vanilla Hough transform") print("Returned points will be radius and angles") cv2.imshow("source", src) cv2.imshow("detected lines", cdst)
def line_detection_low(image): gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY) edges = cv.Canny(gray, 50, 310) # apertureSize参数默认其实就是3 # 50 310 # cv.imshow("edges", edges) edge = Image.fromarray(edges) edge.save("edge.jpeg") lines = cv.HoughLines(edges, 1, np.pi / 180, 30) # 68 # l1 = lines[:, 0, :] # print(l1) mink = float('inf') maxk = -float('inf') for line in lines: rho, theta = line[0] # line[0]存储的是点到直线的极径和极角,其中极角是弧度表示的。 a = np.cos(theta) # theta是弧度 b = np.sin(theta) x0 = a * rho # 代表x = r * cos(theta) y0 = b * rho # 代表y = r * sin(theta) x1 = int(x0 + 1000 * (-b)) # 计算直线起点横坐标 y1 = int(y0 + 1000 * a) # 计算起始起点纵坐标 x2 = int(x0 - 1000 * (-b)) # 计算直线终点横坐标 y2 = int( y0 - 1000 * a ) # 计算直线终点纵坐标 注:这里的数值1000给出了画出的线段长度范围大小,数值越小,画出的线段越短,数值越大,画出的线段越长 print("x1: %s, y1:%s, x2:%s, y2:%s" % (x1, y1, x2, y2)) k = (y2 - y1) / (x2 - x1) if k > maxk: maxk = k xmax1 = x1 ymax1 = y1 xmax2 = x2 ymax2 = y2 lineMax = line if k < mink: mink = k xmin1 = x1 ymin1 = y1 xmin2 = x2 ymin2 = y2 lineMin = line cv.line(image, (xmax1, ymax1), (xmax2, ymax2), (255, 0, 0), 2) # 点的坐标必须是元组,不能是列表。 cv.line(image, (xmin1, ymin1), (xmin2, ymin2), (255, 0, 0), 2) # 点的坐标必须是元组,不能是列表。 crossX = int((maxk * xmax1 - ymax1 - mink * xmin1 + ymin1) / (maxk - mink)) crossY = int( (maxk * mink * (xmax1 - xmin1) + maxk * ymin1 - mink * ymax1) / (maxk - mink)) print(crossX, 250 - crossY) height = 250 - crossY print("顶点高度:" + str(int(height))) x1 = (-height) / mink + crossX x2 = (-height) / maxk + crossX print("与x轴交点:%f,%f" % (x1, x2)) # 底边长度 xl = abs(x1 - x2) cv.circle(image, (crossX, crossY), 3, (0, 255, 0), -1) # 两直线交点 cv.circle(image, (xmax2, ymax2), 3, (0, 0, 255), -1) cv.circle(image, (xmin1, ymin1), 3, (0, 0, 255), -1) vector1 = np.array([xmax2 - crossX, ymax2 - crossY]) vector2 = np.array([xmin1 - crossX, ymin1 - crossY]) L1 = np.sqrt(vector1.dot(vector1)) L2 = np.sqrt(vector2.dot(vector2)) cos_angle = vector1.dot(vector2) / (L1 * L2) angle = np.arccos(cos_angle) angle2 = angle * 360 / 2 / np.pi print(angle2) # cv.imshow("image-lines", image) im = Image.fromarray(image) # im.save(image_line) cv.waitKey(0) return angle2, edge, image, height, xl
# Codes inspired by: # Github.com/imvinod/ # Official Documentation #============================================================================== import cv2 import numpy as np #HOUGHLINES LINE DETECTION image = cv2.imread('imgs/demo1.jpg') cv2.imshow('Original', image) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 100, 170) rho_accuracy = 1 theta_accuracy = np.pi / 180 threshold = 210 lines = cv2.HoughLines(edges, rho_accuracy, theta_accuracy, threshold) for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(image, (x1, y1), (x2, y2), (255, 255, 0), 2) #cv2.imwrite('houghlines.jpg',image) cv2.imshow('Hough Lines', image)
def get_hough_lines(self, edges): kernel = np.ones((11, 11), np.uint8) edges = cv2.dilate(edges, kernel, iterations=1) # cv2.imshow('dilated edges in hough', edges) # cv2.waitKey(0) # cv2.destroyAllWindows() # kernel = np.ones((2,2),np.uint8) # dilated_edges = cv2.dilate(edges,kernel,iterations = 1) minLength = 80 maxLineGap = 50 lines = cv2.HoughLines(edges, 1, np.pi / 180, 80) # lines = np.squeeze(lines) # lines = cv2.HoughLinesP(edges,1,np.pi/120,10, minLength, maxLineGap) if (lines is not None): lines = np.squeeze(lines) print("houghlines shape =", lines.shape) edges3CH = np.dstack((edges, edges, edges)) new_lines = [] rect_line = np.zeros((1, 2)) for rho, theta in lines: a = np.cos(np.float(theta)) b = np.sin(np.float(theta)) ##### convert line from polar coordinates to cartesian coordinattes slope = -a / b intercept = rho / b # print("intercept, slope = ",intercept,slope) rect_line = np.vstack((rect_line, np.array([intercept, slope]))) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(edges3CH, (x1, y1), (x2, y2), (0, 0, 255), 2) # new_lines.append([rh,th]) # cv2.imshow('all houg_lines', edges3CH) # cv2.waitKey(0) # cv2.destroyAllWindows() print("new_lines = ", new_lines) np_lines = np.array(new_lines) status = False line_clusters = self.get_line_clusters(np_lines, 25, 0.4) print("lines clusters = ", line_clusters) if (line_clusters.shape[0] >= 4): mod_line_cluster = self.augment_lines(line_clusters) # print ("mod_line_cluster",mod_line_cluster) rect_line = np.zeros((1, 2)) # print line_clusters # for rho,theta in mod_line_cluster: for rho, theta in new_lines: # rho = rho+1 a = np.cos(np.float(theta)) b = np.sin(np.float(theta)) ##### convert line from polar coordinates to cartesian coordinattes slope = -a / b intercept = rho / b # print("intercept, slope = ",intercept,slope) rect_line = np.vstack( (rect_line, np.array([intercept, slope]))) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(edges3CH, (x1, y1), (x2, y2), (0, 0, 255), 2) rect_line = rect_line[1:, :] # print ("rect_line",rect_line) # cv2.imshow('clustered hough lines', edges3CH) # cv2.waitKey(0) # cv2.destroyAllWindows() status = True return status, rect_line else: # print('no hough lines') status = False return status, lines else: status = False return status, lines
#plt.title(titles[i]) #plt.xticks([]), plt.yticks([]) #plt.show() ##检测线段 # input_img = canny_img # lines = cv2.HoughLinesP(input_img, 1, np.pi / 180, 100, 100, 10) # for x1, y1, x2, y2 in lines[0]: # cv2.line(input_img, (x1, y1), (x2, y2), (0, 255, 0), 100) # win4 = cv2.namedWindow('xianduan', flags=0) # cv2.imshow('xianduan', input_img) ###霍夫变换 #最后说明多少个点决定一条直线 input_img = canny_img1 lines = cv2.HoughLines(input_img, 1, np.pi / 10, 40) #这里对最后一个参数使用了经验型的值 lines1 = lines[:, 0, :] #提取为为二维 for rho, theta in lines1[:]: print(rho, theta) a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) #print(x1,y1,x2,y2) input_img = cv2.line(input_img, (x1, y1), (x2, y2), (255, 0, 0), 2) win5 = cv2.namedWindow('HF', flags=0)
import cv2 import numpy as np """ hough transform is apopular technique to detect any shape, if you can represent that shape in a mathematical form. it can detect the shape even if it is broken or distorted a little bit""" #steps #Edge detection #Mapping of edge points to the hough space snd store in an acumulator # interpretation of the accumulator toyeild lines of infinite lenght #the interpretation is done by thresholding and other possible constraints #conversion of infinite lines to finite lines img = cv2.imread(r'data/sudoku.png') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) edge = cv2.Canny(gray, 50, 150, apertureSize=3) lines = cv2.HoughLines(edge, 1, np.pi / 180, 200) for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
print("Horitzontal lines: ", len(horitzontalLines)) for i in range(0, len(verticalLines)): #print VERTICAL in GREEN cv2.line(cdstP, (verticalLines[i][0][0], verticalLines[i][0][1]), (verticalLines[i][0][2], verticalLines[i][0][3]), (0, 255, 0), 3, cv2.LINE_AA) print("LINE ", i, ": ") print("--------------") print(verticalLines[i]) print("Vertical lines: ", len(verticalLines)) elif method == 2: lines = cv2.HoughLines(dst, 1, np.pi / 180, 100, None, 0, 0) #linesP = cv2.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10) # SOURCE size = 140 lines -- ANotacio utilitzada per veure si anava millorant la detecció de linies # mathematicalLines es un array que intenta expressar les linies d'una manera més humana # amb punts d'inici, final, graus, distàncies.... valid_lines = [] for line in lines: rho, theta = line[0] if theta > math.pi: theta = theta - math.pi if theta > math.pi / 2 + math.pi / 4: theta = theta - math.pi assigned = False
def hough(self, picture): img = cv2.cvtColor(picture, cv2.COLOR_RGB2GRAY) # uu=rgb2gray(picture) #cv2.imshow("gray", img) # gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx = 1, dy = 0, ksize = -1) # gradY = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 0, dy = 1, ksize = -1) # cv2.imshow("gradX", gradX) # x=cv2.waitKey(1)& 0xFF # cv2.imshow("gradY", gradY) # x=cv2.waitKey(1) # subtract the y-gradient from the x-gradient # gradient = cv2.subtract(gradX, gradY) # img = cv2.convertScaleAbs(gradient) img = cv2.blur(img, (3, 3)) edges = cv2.Canny(img, 50, 150, apertureSize=3) # cv2.imshow('cannyuu',edges) lines = cv2.HoughLines(edges, 1, np.pi / 180, 118) # ÕâÀï¶Ô×îºóÒ»¸ö²ÎÊýʹÓÃÁ˾ÑéÐ͵ÄÖµ if (lines is not None): for line in lines[0]: line_pix = line[0] else: line_pix = -1 result = img.copy() shuipingx = [] if (lines is not None): for lop in range(int(lines.size / 2)): for line in lines[lop]: rho = line[0] # µÚÒ»¸öÔªËØÊǾàÀërho theta = line[1] # µÚ¶þ¸öÔªËØÊǽǶÈtheta #print(rho) #print(theta) if (theta < (np.pi / 4.)) or (theta > (3. * np.pi / 4.0)): # ´¹Ö±Ö±Ïß # ¸ÃÖ±ÏßÓëµÚÒ»ÐеĽ»µã pt1 = (int(rho / np.cos(theta)), 0) # ¸ÃÖ±ÏßÓë×îºóÒ»ÐеĽ¹µã pt2 = (int((rho - result.shape[0] * np.sin(theta)) / np.cos(theta)), result.shape[0]) # »æÖÆÒ»Ìõ°×Ïß cv2.line(result, pt1, pt2, 0, 1) else: # ˮƽֱÏß # ¸ÃÖ±ÏßÓëµÚÒ»ÁеĽ»µã pt1 = (0, int(rho / np.sin(theta))) # ¸ÃÖ±ÏßÓë×îºóÒ»ÁеĽ»µã pt2 = (result.shape[1], int((rho - result.shape[1] * np.cos(theta)) / np.sin(theta))) # »æÖÆÒ»ÌõÖ±Ïß if (pt2[0] - pt1[0] > 0): cv2.line(result, pt1, pt2, 0, 5) shuipingx.append(pt1[1]) #print(('pt2=', pt2, 'pt1 =', pt1)) # if shuipingx>0: # break nowiq = datetime.datetime.now() if not os.path.isdir('hough'): os.makedirs('hough') print('create dir picture') cv2.imwrite( 'hough/%s_%s_%s_%s_%s_%s.jpg' % (nowiq.year, nowiq.month, nowiq.day, nowiq.hour, nowiq.minute, nowiq.second), result) return result, shuipingx, line_pix
def detectTurn(self): ### Params for region of interest bot_left = [0, 480] bot_right = [640, 480] apex_right = [640, 170] apex_left = [0, 170] v = [np.array([bot_left, bot_right, apex_right, apex_left], dtype=np.int32)] cropped_raw_image = self.region_of_interest(cf.img_rgb_raw, v) # cropped_raw_image = cf.img_rgb_raw[self.crop_top:self.crop_bottom, :] ### Run canny edge dection and mask region of interest # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) hsv = cv2.cvtColor(cropped_raw_image, cv2.COLOR_BGR2HSV) lower_white = np.array([0,0,255], dtype=np.uint8) upper_white = np.array([179,255,255], dtype=np.uint8) mask = cv2.inRange(hsv, lower_white, upper_white) dilation = cv2.dilate(mask, self.kernel, iterations=1) closing = cv2.morphologyEx(dilation, cv2.MORPH_GRADIENT, self.kernel) closing = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, self.kernel) blur = cv2.GaussianBlur(closing, (9,9), 0) edge = cv2.Canny(blur, 150,255) cropped_image = self.region_of_interest(edge, v) # cropped_image = edge[self.crop_top:self.crop_bottom, :] # blank_image = np.zeros(cropped_raw_image.shape) # turnSignal = False lines = cv2.HoughLines(cropped_image, rho=0.2, theta=np.pi/80, threshold=70) if lines is not None: # print('lines', len(lines)) for line in lines: for rho,theta in line: a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*(a)) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) cv2.line(cropped_raw_image, (x1,y1), (x2,y2), cf.listColor[0], 2) # cv2.line(blank_image, (x1,y1), (x2,y2), cf.listColor[0], 2) if abs(y1-y2) < 40: # turnSignal = True # break return True # cv2.imshow('hsv', hsv) # cv2.imshow('closing', closing) # cv2.imshow('cropped_image', cropped_image) # cv2.imshow('cropped_raw_image', cropped_raw_image) # cv2.imshow('blank_image', blank_image) return False
gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) low_threshold, high_threshold = 100, 500 edges = cv2.Canny(gray, low_threshold, high_threshold) cv2.imshow('edges', gray) cv2.waitKey(0) rho = 3 # distance resolution in pixels of the Hough grid theta = np.pi / 180 # angular resolution in radians of the Hough grid threshold = 200 # minimum number of votes (intersections in Hough grid cell) # min_line_length = 10 # minimum number of pixels making up a line # max_line_gap = 5 # maximum gap in pixels between connectable line segments line_image = np.copy(img) * 0 # creating a blank to draw lines on lines = cv2.HoughLines(edges, rho, theta, threshold) print(len(lines)) for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)
def main(argv): ## [load] default_file = 'paashaas.jpg' maxlen = 7 maxhoek = np.pi / 2 # in radialen offset_y = 200 + 200 offset_x = 180 filename = argv[0] if len(argv) > 0 else default_file # Loads an image src = cv.imread(cv.samples.findFile(filename), 0) # cv.IMREAD_GRAYSCALE) # Check if image is loaded fine if src is None: print('Error opening image!') print('Usage: hough_lines.py [image_name -- default ' + default_file + '] \n') return -1 ## [load] ## [edge_detection] # Edge detection dst = cv.Canny(src, 50, 200, None, 3) ## [edge_detection] # Copy edges to the images that will display the results in BGR cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR) cdstP = np.copy(cdst) ## [hough_lines] # Standard Hough Line Transform lines = cv.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0) ## [hough_lines] ## [draw_lines] # Draw the lines if lines is not None: for i in range(0, len(lines)): rho = lines[i][0][0] theta = lines[i][0][1] a = math.cos(theta) b = math.sin(theta) x0 = a * rho y0 = b * rho pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a))) pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a))) cv.line(cdst, pt1, pt2, (0, 0, 255), 3, cv.LINE_AA) ## [draw_lines] ## [hough_lines_p] # Probabilistic Line Transform linesP = cv.HoughLinesP(dst, 1, np.pi / 2, 1, None, 0, 0) ## [hough_lines_p] ## [draw_lines_p] # Draw the lines coorda = [] # array van coordinaten if linesP is not None: File1 = open("tabel2.txt", "w") for i in range(0, len(linesP)): l = linesP[i][0] cv.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0, 0, 255), 3, cv.LINE_AA) coorda.append([[l[0] + offset_x, -l[1] + offset_y], [l[2] + offset_x, -l[3] + offset_y]]) File1.write("[" + str(l[0]) + " " + str(l[1]) + "]" + "\n") File1.close() coorda = sorted(coorda) # array of coordinates groepnr = 0 coordgroepen = [coorda[0]] # array of coordinates while len(coorda) > 0: afstanden = [] weg = [] for i in coorda: if len(coordgroepen[groepnr]) >= 3: afst = afstand(coordgroepen[groepnr][-3], coordgroepen[groepnr][-1], i[1], maxlen, maxhoek) else: afst = afstand(coordgroepen[groepnr][1], coordgroepen[groepnr][-1], i[1], maxlen, maxhoek) if afst < maxlen: afstanden.append([afst, i]) if len(afstanden) == 0: if len(coorda) > 0: coordgroepen.extend([coorda[0]]) groepnr += 1 else: afstanden.sort() coordgroepen[groepnr].extend(afstanden[0][1]) coorda.remove(afstanden[0][1]) print(groepnr) plt.axis('off') plt.axis('equal') for i in coordgroepen: xs = [x[0] for x in i] ys = [x[1] for x in i] plt.plot(xs, ys) # plt.savefig('dobot\\dobot_min' + minlen.__str__() + '_max' + maxlen.__str__() + '_' + default_file) plt.show() ## [draw_lines_p] ## [imshow] # Show results # cv.imshow("Source", src) # cv.imshow("Detected Lines (in red) - Standard Hough Line Transform", cdst) cv.imshow("Detected Lines (in red) - Probabilistic Line Transform", cdstP) ## [imshow] ## [exit] # Wait and Exit cv.waitKey() return 0
def find_road_boundary(img): img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_blur = cv2.GaussianBlur(img_gray, (3, 3), 0) contour = cv2.Canny(img_blur, 20, 60) lines = cv2.HoughLines(contour, 1, np.pi / 180, 100, 100, 5) lines_direction = [] lines_intercept = [] lines_dataset2 = [] lines1 = lines[:, 0, :] #for line in lines1: # [x1,y1,x2,y2] = line #print(x1) #print(x2) #print(y1) #print(y2) # if((x1 in range(0,480)) and (x2 in range(0,480)) and (y1 in range(0,360)) and (y2 in range(0,360))): # dx,dy = x2-x1,y2-y1 # angle = np.arctan2(dy,dx) * (180/np.pi) # if (abs(angle)>10 and abs(angle) < 75): # print("%d %d %d %d"% (x1,y1,x2,y2)) # direction = dy/(dx+0.000001) # intercept = y1 - direction*x1 # if(abs(direction)>=40): # direction = 50 # intercept = 100000 # print(direction) # print(intercept) # lines_direction.append(direction) # lines_intercept.append(intercept) # cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2) # lines_dataset2.append(line) # plt.figure() # plt.scatter(lines_direction,lines_intercept) # plt.show() # lines_dataset = np.array([[lines_direction[i],lines_intercept[i]] for i in range(len(lines_direction))]) # print(lines_dataset) #K = range(1,4) #for k in K: # k = 2 # kmeans = KMeans(n_clusters=k) # kmeans.fit(lines_dataset) # print(kmeans.cluster_centers_) # boundaries = [] # for i in range (kmeans.cluster_centers_.shape[0]): # boundary = [] # for j in range(360): # x = int((j - kmeans.cluster_centers_[i][1])/kmeans.cluster_centers_[i][0]) #cv2.circle (img,(x,j),2,(0,0,255),2) #meandistortion = sum lines_dataset = [] for rho, theta in (lines1[:]): print(theta * 180 / np.pi) if ((theta * 180 / np.pi < 80) or (theta * 180 / np.pi > 105)): lines_dataset.append([rho, theta]) a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) #if() cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1) #cv2.imshow("line_detector",img) #cv2.imshow('Canny',contour) #cv2.waitKey(50000) lines_dataset = np.array(lines_dataset) k = 3 kmeans = KMeans(n_clusters=k) kmeans.fit(lines_dataset) print(kmeans.cluster_centers_) boundaries = [] for i in range(kmeans.cluster_centers_.shape[0]): boundary = [] for j in range(360): x = int((j - kmeans.cluster_centers_[i][1]) / kmeans.cluster_centers_[i][0]) cv2.circle(img, (x, j), 2, (0, 255, 0), 2) cv2.imshow("line_detector", img) #cv2.imshow('Canny',contour) cv2.waitKey(50000)
clip_img = new_img[left_top_w:right_bottle_w, left_top_l:right_bottle_l, :] #图片切割的结果 #cv.imwrite('pic/clip.jpg', clip_img) #cv.imshow('clip_img', clip_img) gray_img = cv.cvtColor(clip_img, cv.COLOR_BGR2GRAY) #灰度化 edges_img = cv.Canny(gray_img, 50, 150, apertureSize=3) #边缘检测 ret, binary = cv.threshold(gray_img, 0, 255, cv.THRESH_BINARY | cv.THRESH_TRIANGLE) #cv.imshow('gray_img', gray_img) #cv.imshow('edges', edges_img) #cv.imshow('thresh', binary) lines = cv.HoughLines(edges_img, 1, np.pi / 180, 150) #houghline直线检测 #print(lines.shape) ls = [] #保存直线上的点 for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a))
cv2.imshow('original', img) b_channel = np.array(img[:,:,0]).astype('float') g_channel = np.array(img[:,:,1]).astype('float') r_channel = np.array(img[:,:,2]).astype('float') #cv2.imshow('b_chan', b_channel) #cv2.imshow('g_chan', g_channel) #cv2.imshow('r_chan', r_channel) bgr_channel = np.add((np.add(b_channel, g_channel)), r_channel) img_rec_red = np.divide(r_channel, bgr_channel) img_rec_red = img_rec_red * 255 img_rec_red = np.floor(img_rec_red).astype('uint8') #gray = cv2.cvtColor(img_rec_red,cv2.COLOR_BGR2GRAY) edges = cv2.Canny(img_rec_red,50,150,apertureSize = 3) lines = cv2.HoughLines(edges,1,np.pi/40,40) print("raw lines:") print(lines) # convert the grayscale image to binary image ret,thresh = cv2.threshold(img_rec_red,127,255,0) cv2.imshow('thresh', thresh) # calculate moments of binary image im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) try: for c in contours: # calculate moments for each contour M = cv2.moments(c) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"])
imlow = cv2.cvtColor(imlow1, cv2.COLOR_BGR2GRAY) imlow = cv2.normalize(imlow.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX) ## laplacian = cv2.Laplacian(imlow,cv2.CV_64F) ## laplacian1 = np.absolute(laplacian) ## laplacian2 = np.uint8(laplacian1) kernel = np.array([[-1, 0, 1]]) dst = cv2.filter2D(imlow, -1, kernel) dst[dst < 0] = 0 # Where values are low dst[dst > 1] = 1 # Where values are high dst = dst * 255 dst = np.uint8(dst) ret, th3 = cv2.threshold(dst, 20, 255, cv2.THRESH_BINARY) lines = cv2.HoughLines(th3, 1, np.pi / 180, 1) lines = np.double(lines) condition11 = 1 condition12 = 1 if lines != []: for rho, theta in lines[:, 0, :]: if (theta < 80 * npi) & (theta > 0 * npi) & (condition11 == 1): thetaL = theta rhoL = rho condition11 = 0 if (theta > 120 * npi) & (theta < 180 * npi) & (condition12 == 1): thetaR = theta rhoR = rho condition12 = 0
def hough_line(edges, min_line_length=100, max_line_gap=10): lines = cv2.HoughLines(edges, 1, np.pi / 180, 125, min_line_length, max_line_gap) lines = np.reshape(lines, (-1, 2)) return lines
def main(): #for eachArg in sys.argv: # print eachArg filename = sys.argv[1] image = cv2.imread(filename) if image is None: print 'Unable to open file ', filename return rows=image.shape[0] cols=image.shape[1] gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray,150,250,apertureSize = 3) # Arguments are distance resolution, angle resolution, threshold # Large distance resolution yields larger bins so more lines meeting # threshold. Larger angular resolution yeilds fewer lines with similar # lines counting as the same line lines = cv2.HoughLines(edges,2,2*np.pi/180,100) if lines is None: print 'No lines found' return for line in lines: for rho, theta in line: if theta != 0: # ignore verticals a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho seglength = 1000 x1 = int(x0 + seglength*(-b)) y1 = int(y0 + seglength*(a)) x2 = int(x0 - seglength*(-b)) y2 = int(y0 - seglength*(a)) cv2.line(image,(x1,y1),(x2,y2),(0,0,255),2) xtotal=0 xcount=0 ytotal=0 ycount=0 for line1,line2 in combinations(lines,2): rho1=line1[0][0] theta1=line1[0][1] rho2=line2[0][0] theta2=line2[0][1] #print rho1, theta1, rho2, theta2 if theta1 != 0 and theta2 != 0: # ignore verticals x0, y0 = intersection(rho1,theta1,rho2,theta2) xtotal+=x0 xcount+=1 ytotal+=y0 ycount+=1 cv2.circle(image, (x0,y0),3,(255,0,0),-1) cv2.circle(image, (xtotal/xcount,ytotal/ycount),50,(0,255,255),3) cv2.namedWindow('Hall with Line', cv2.WINDOW_NORMAL) cv2.imshow('Hall with Line',image) cv2.waitKey(0) cv2.destroyAllWindows()
def enderezar(entrada, salida): # Leer la imagen imagen = cv2.imread(entrada) # Convertirla a gris y detectar bordes gray = cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY) binaria = cv2.Canny(gray,50,150,apertureSize = 3) #cv2.imshow('Grayscale', imagen)#, binaria) # Usar la transformada de Hough para encontrar líneas # en la imagen binarizada, con una resolución de medio # grado (pi/720) y quedándose sólo con las líneas que # alcancen puntuación de 1000 o más (que serán las # más largas) lineas = cv2.HoughLines(binaria, 1, np.pi/720, 100) # Recopilemos qué ángulos ha encontrado la transformada # de hough para cada una de las líneas halladas angulos = [] try: for linea in lineas: rho, theta = linea[0] if rho<0: theta = -theta # Quedarse solo con las rayas próximas a la horizontal # (con un error de +-10 grados) if not estan_cercanos(theta, np.pi/2, np.deg2rad(20)): continue; angulos.append(theta) from collections import Counter veces = Counter(angulos) # Quedémonos con los tres casos más frecuentes frecuentes = veces.most_common(3) # Y calculemos el promedio de esos tres casos suma = sum(angulo*repeticion for angulo,repeticion in frecuentes) repeticiones = sum(repeticion for angulo, repeticion in frecuentes) angulo = suma/repeticiones angulo = np.rad2deg(angulo - np.pi/2) print("[INFO] angulo: {:.5f}".format(angulo)) W = 1200. height, width, depth = imagen.shape imgScale = W/width newX,newY = imagen.shape[1]*imgScale, imagen.shape[0]*imgScale # Ahora enderecemos la imagen, girando el ángulo detectado (h, w) = imagen.shape[:2] centro = (w // 2, h // 2) M = cv2.getRotationMatrix2D(centro, angulo, 1.0) girada = cv2.warpAffine(imagen, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE) girada = cv2.resize(girada,(int(newX),int(newY)), interpolation = cv2.INTER_AREA) # Y volcamos a disco el resultado cv2.imwrite(salida, girada) except: W = 1200. height, width, depth = imagen.shape imgScale = W/width newX,newY = imagen.shape[1]*imgScale, imagen.shape[0]*imgScale print("Solo resize") girada = cv2.resize(imagen,(int(newX),int(newY)), interpolation = cv2.INTER_AREA) cv2.imwrite(salida, girada)
import cv2 import numpy as np img = cv2.imread('test.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cv2.imshow('grey', gray) cv2.waitKey(0) edges = cv2.Canny(gray, 50, 150, apertureSize=3) cv2.imshow('edges', edges) cv2.waitKey(0) lines = cv2.HoughLines(edges, 1, np.pi / 180, 120) print len(lines) print len(lines[0]) print len(lines[0][0]) for i in lines: rho = i[0][0] theta = i[0][1] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2) cv2.imshow('lines', img) cv2.waitKey(0) cv2.imwrite('houghlines3.jpg', img)
def applyHough(image, original): img = cv2.imread(image) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #edges = cv2.Canny(gray,50,150,apertureSize = 3) #23,55 edges = cv2.Canny(gray, 23, 60, apertureSize=3) lines = cv2.HoughLines(edges, 1, np.pi / 720, 120) #150 #90 if lines is not None: # number = 0; # x1_mean = y1_mean = x2_mean = y2_mean = 0; x1_list = [] x2_list = [] y1_list = [] y2_list = [] for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) if (y1 <= y2): x1_list.append(x1) x2_list.append(x2) y1_list.append(y1) y2_list.append(y2) else: x1_list.append(x2) x2_list.append(x1) y1_list.append(y2) y2_list.append(y1) # number = number + 1; # x1_mean = (x1_mean)+(x1-x1_mean)/number # y1_mean = (y1_mean)+(y1-y1_mean)/number # x2_mean = (x2_mean)+(x2-x2_mean)/number # y2_mean = (y2_mean)+(y2-y2_mean)/number x1_median = int(np.ma.median(x1_list)) x2_median = int(np.ma.median(x2_list)) y1_median = int(np.ma.median(y1_list)) y2_median = int(np.ma.median(y2_list)) # cv2.line(original,(int(x1_mean),int(y1_mean)),(int(x2_mean),int(y2_mean)),(0,0,255),2) lines = cv2.HoughLinesP(edges, 0.5, np.pi / 720, 75, 70, 8) l = 0 xmin = ymin = 100000 xmax = ymax = -100000 if lines is not None: for line in lines: x1, y1, x2, y2 = line[0] if (y1 < ymin): ymin = y1 if (x1 < xmin): xmin = x1 if (x2 > xmax): xmax = x2 if (y2 > ymax): ymax = y2 if (y2 < ymin): ymin = y2 if (x2 < xmin): xmin = x2 if (x1 > xmax): xmax = x1 if (y1 > ymax): ymax = y1 # cv2.line(original,(x1,y1),(x2,y2),(0,0,255),2) # l = math.sqrt((xmin-xmax)**2 + (ymin-ymax)**2) # cv2.line(original,(int(x1_mean)+int(x2_mean)-xmin,int(y2_mean)+int(y1_mean)-ymin),(int(xmin),ymin),(0,0,255),2) # slope_mean = abs((y2_mean - y1_mean)/(x2_mean-x1_mean)) # slope_median = abs((y2_median - y1_median)/(x2_median-x1_median)) # print(slope_mean-slope_median); # cv2.line(original,(int(x1_mean),int(y1_mean)),(int(x2_mean),int(y2_mean)),(0,0,255),2) global cutoff, prev_x1, prev_x2, prev_y1, prev_y2, last5_slopes if (x2_median - x1_median is not 0): slope = (y2_median - y1_median) / (x2_median - x1_median) else: slope = last5_slopes[len(last5_slopes) - 1] prev_slope = slope use_this = True all_neg = True all_pos = True for x in last5_slopes: if (x > 0): all_neg = False elif (x < 0): all_pos = False if (len(last5_slopes) > 4) and ( (all_pos and slope < 0 and last5_slopes[len(last5_slopes) - 1] < 200) or (all_neg and slope > 0 and last5_slopes[len(last5_slopes) - 1] > -200)): use_this = False else: last5_slopes.append(slope) while (len(last5_slopes) > 5): last5_slopes.pop(0) # print(str(prev_slope) + " " + str(slope)); # if(prev_slope != 0): # if(slope - prev_slope > epsilon or slope -prev_slope < epsilon): # slope = prev_slope; x_lim_up = (0 - y1_median) / slope + x1_median if (cutoff == 0): cutoff = ymax elif (cutoff > 0 and ymax > 0): cutoff = 0.9 * cutoff + 0.1 * ymax # print(cutoff) x_lim_down = (cutoff - y1_median) / slope + x1_median # print(str(x1_median) + " " + str(y1_median) + " " + str(x2_median) + " " + str(y2_median)) # cv2.line(original,(int(x1_median),int(y1_median)),(int(x2_median),int(y2_median)),(0,255,255),3) if use_this: cv2.line(original, (int(x_lim_up), int(0)), (int(x_lim_down), int(cutoff)), (0, 0, 255), 3) else: cv2.line(original, (int(prev_x1), int(prev_y1)), (int(prev_x2), int(prev_y2)), (0, 0, 255), 3) prev_x1 = x_lim_up prev_y1 = 0 prev_x2 = x_lim_down prev_y2 = cutoff # cv2.imshow('asd',img) elif persist: cv2.line(original, (int(prev_x1), int(prev_y1)), (int(prev_x2), int(prev_y2)), (0, 0, 255), 3) # prev_slope = 0; return original
#Applying hough lines minLineLength = 70 maxLineGap = 0.1 lines = cv2.HoughLinesP(total_gradient, 1, np.pi / 180, 5, minLineLength, maxLineGap) for x1, y1, x2, y2 in lines[0]: cv2.line(image_1, (x1, y1), (x2, y2), (0, 255, 0), thickness=2, lineType=8, shift=0) lines = cv2.HoughLines(total_gradient, 1, np.pi / 180, 200) for rho, theta in lines[0]: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 1) #for var in range(-135, 150, 45): cv2.imshow("Figure: Sobel Edges", total_gradient)
def analyzeGameBoard(image, debug=False): """ Determine the current state of the game board. Return a 2d array specifiying the contents of each of the nine spaces, one of {'X', 'O', ' '} :param image: Image of the game board on a blank background :param debug: If true, displays step-by-step visuals for debugging :return: A 2d array specifiying the contents of each of the nine spaces, one of {'X', 'O', ''} """ image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = cv2.GaussianBlur(image, (5, 5), 0) binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] edges = cv2.Canny(binary, 1, 254) if debug: cv2.imshow("Image", image) cv2.imshow("Binary", binary) cv2.imshow("Edges", edges) cv2.waitKey(0) cv2.destroyAllWindows() # Find contours based on edges contours = cv2.findContours(edges.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # Normalize format of contours between different versions of OpenCV contours = imutils.grab_contours(contours) # Find the contour with the largest area, which should be the game board board2 = max(contours, key=cv2.contourArea) #contours.remove(board2) contours = sorted(contours, key=cv2.contourArea, reverse=True) contours = contours[1:] board = max(contours, key=cv2.contourArea) contours = sorted(contours, key=cv2.contourArea, reverse=True) contours = contours[1:] #contours.remove(board) mask = np.zeros_like(binary) cv2.drawContours(mask, [board], 0, 255, -1) out = np.full_like(binary, 255) out[mask == 255] = binary[mask == 255] for contour in contours: mask = np.zeros_like(binary) cv2.drawContours(mask, [contour], 0, 255, -1) out[mask == 255] = 255 if debug: cv2.imshow('t', mask) cv2.imshow('h', out) cv2.waitKey(0) cv2.destroyAllWindows() if debug: cv2.imshow('Original', binary) cv2.imshow('Mask', mask) cv2.imshow('Output', out) cv2.waitKey(0) cv2.destroyAllWindows() boardEdges = cv2.Canny(out, 1, 254) lines = cv2.HoughLines(boardEdges, 2, np.pi / 90, 100) lines = mergeLines(lines) vLines, hLines = findExtremeLines(lines) lines = vLines + hLines if debug: for line in lines: for rho, theta in line: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2) cv2.imshow("i", image) cv2.waitKey(0) cv2.destroyAllWindows() # Remove the game board from the image binary[out == 0] = 255 if debug: cv2.imshow('mask', mask) cv2.imshow('out', out) cv2.imshow('binary', binary) cv2.waitKey(0) cv2.destroyAllWindows() tlPoint, trPoint, blPoint, brPoint = getAllIntersections(vLines, hLines) upperMiddle = int((tlPoint[0] + trPoint[0]) / 2) middleLeft = int((tlPoint[1] + blPoint[1]) / 2) middleRight = int((trPoint[1] + brPoint[1]) / 2) lowerMiddle = int((blPoint[0] + brPoint[0]) / 2) yMax = binary.shape[0] - 1 xMax = binary.shape[1] - 1 spaces = np.empty((3, 3), dtype=object) if debug: image[tlPoint[0], tlPoint[1]] = 255 image[trPoint[0], trPoint[1]] = 255 image[blPoint[0], blPoint[1]] = 255 image[brPoint[0], brPoint[1]] = 255 cv2.imshow('h', image) cv2.waitKey(0) cv2.destroyAllWindows() spaces[0][0] = binary[0:tlPoint[0], 0:tlPoint[1]] spaces[0][1] = binary[0:upperMiddle, tlPoint[1]:trPoint[1]] spaces[0][2] = binary[0:trPoint[0], trPoint[1]:xMax] spaces[1][0] = binary[tlPoint[0]:blPoint[0], 0:middleLeft] spaces[1][1] = binary[upperMiddle:lowerMiddle, middleLeft:middleRight] spaces[1][2] = binary[trPoint[0]:brPoint[0], middleRight:xMax] spaces[2][0] = binary[blPoint[0]:yMax, 0:blPoint[1]] spaces[2][1] = binary[lowerMiddle:yMax, blPoint[1]:brPoint[1]] spaces[2][2] = binary[brPoint[0]:yMax, brPoint[1]:xMax] gameState = np.full((3, 3), ' ') for i in range(3): for j in range(3): gameState[i][j] = analyzeSpace(spaces[i][j], debug) return gameState
def run(self): if self.active: #pub.sendMessage("transectline", message = [1, 2, 3, 4, 5, 6]) #pub_to_manager("transectline", message = [1, 2, 3, 4, 5, 6]) #print("active: ",self.active) #print("show: ", self.show) if self.captureON == False: if self.simulation==False: self.cap = cv2.VideoCapture(0) self.captureON = True ''' cv2.namedWindow('Sliders') cv2.createTrackbar('H','Sliders',0,255,nothing) cv2.createTrackbar('H_Range','Sliders',0,255,nothing) cv2.createTrackbar('S','Sliders',0,255,nothing) cv2.createTrackbar('S_Range','Sliders',0,255,nothing) cv2.createTrackbar('V','Sliders',0,255,nothing) cv2.createTrackbar('V_Range','Sliders',0,255,nothing) ''' ret, frame = self.cap.read() height = frame.shape[0] width = frame.shape[1] origin = (0, 0) center = (width//2, height//2) process = np.zeros([height,width,1],dtype=np.uint8) process.fill(255) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) H = cv2.getTrackbarPos('H','Sliders') H = 102 H_Range = cv2.getTrackbarPos('H_Range','Sliders') H_Range = 51 S = cv2.getTrackbarPos('S','Sliders') S = 51 S_Range = cv2.getTrackbarPos('S_Range','Sliders') S_Range = 128 V = cv2.getTrackbarPos('V','Sliders') V = 102 V_Range = cv2.getTrackbarPos('V_Range','Sliders') V_Range = 71 lower_blue = np.array([H,S,V]) #110-130 upper_blue = np.array([SliderLimit(H, H_Range),SliderLimit(S, S_Range),SliderLimit(V, V_Range)]) mask = cv2.inRange(hsv, lower_blue, upper_blue) res = cv2.bitwise_and(frame,frame, mask= mask) median = cv2.medianBlur(res,5) #cv2.imshow('res',res) grayscaled = cv2.cvtColor(median,cv2.COLOR_BGR2GRAY) #cv2.imshow('grayscaled',grayscaled) ''' kernel = np.ones((10,10),np.uint8) erosion = cv2.erode(grayscaled,kernel,iterations = 1) cv2.imshow('erosion',erosion) ''' #retval, threshold = cv2.threshold(grayscaled, 10, 255, cv2.THRESH_BINARY) #cv2.imshow('gray', grayscaled) # The bitwise and of the frame and mask is done so # that only the blue coloured objects are highlighted # and stored in res #res = cv2.bitwise_and(frame,frame, mask= mask) #Filter out all colours except for a range of blue #gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) #cv2.imshow('gray',gray) edges = cv2.Canny(grayscaled,50,150,apertureSize = 3) #cv2.imshow('edges',edges) ''' theta = np.pi / 180 rho = 50 threshold = 15 # minimum number of votes (intersections in Hough grid cell) min_line_length = 50 # minimum number of pixels making up a line max_line_gap = 20 lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),min_line_length, max_line_gap) ''' point1_x, point1_y, point2_x, point2_y = width//2, height, width//2, 0 lines = cv2.HoughLines(edges,10,np.pi/180, 200, 0) line_amount = 4 coordinates = [] lines_seen = line_amount #Lines = [line_attr() for i in range(line_amount)] Lines = [] pos_length = 0 pos_len_count = 1 neg_length = 0 neg_len_count = 1 for i in range(0, line_amount): try: for r,theta in lines[i]: # Stores the value of cos(theta) in a a = np.cos(theta) # Stores the value of sin(theta) in b b = np.sin(theta) # x0 stores the value rcos(theta) x0 = a*r # y0 stores the value rsin(theta) y0 = b*r # x1 stores the rounded off value of (rcos(theta)-1000sin(theta)) x1 = int(x0 + 1000*(-b)) # y1 stores the rounded off value of (rsin(theta)+1000cos(theta)) y1 = int(y0 + 1000*(a)) # x2 stores the rounded off value of (rcos(theta)+1000sin(theta)) x2 = int(x0 - 1000*(-b)) # y2 stores the rounded off value of (rsin(theta)-1000cos(theta)) y2 = int(y0 - 1000*(a)) ### #y - y2 = s(x - x2) #y = sx - sx2 + y2 s = get_gradient((x1,y1), (x2, y2)) t = -x2*s+y2 ### cv2.line(process, (x1, y1), (x2,y2), (0, 0, 255), 1) m = -1/s k = -m*center[0] + center[1] ### #y - y2 = s(x - x2) #y = sx - sx2 + y2 #y = sx + t #y - centery = m(x - centerx) #y = mx - mcenterx + centery #y = mx + k #sx + t = mx + k #x(m-s) = t-k #x = (t-k)/(m-s) x_intersect = round((t-k)/(m-s)) y_intersect = round(s*x_intersect + t) intersect = (x_intersect, y_intersect) remove = False for coordinate in coordinates: if within_range(x_intersect, coordinate[0]-50, coordinate[0]+50) and within_range(y_intersect, coordinate[1]-50, coordinate[1]+50): line_amount -= 1 lines_seen -= 1 remove = True continue if remove == True: continue coordinates.append(intersect) ### cv2.line(process,(center),(intersect),(0,0,255),1) cv2.circle(process, (intersect), 2, (0, 0, 255), 2) cv2.circle(process, (center), 2, (0, 0, 255), 2) if x_intersect < center[0]: length = -1*get_length((center), (intersect)) neg_length = neg_length + length neg_len_count +=1 else: length = get_length((center), (intersect)) pos_length = pos_length + length pos_len_count += 1 abs_length = abs(length) angle = np.arctan(self.relative(intersect, center)[1]/self.relative(intersect, center)[0]) #print(angle) if self.relative(intersect, center)[1]>0 and self.relative(intersect, center)[0] < 0: angle = np.pi + angle if self.relative(intersect, center)[1]<0 and self.relative(intersect, center)[0] < 0: angle = angle + np.pi if self.relative(intersect, center)[1]<0 and self.relative(intersect, center)[0] > 0: angle = 2*np.pi - angle*-1 ### #y = sx + t #0 = sx + t #-t = sx #x = -t/s top_intersect_x = -t/s top_intersect = (top_intersect_x, 0) ### #Lines[i].set((length, angle, top_intersect_x)) Lines = Lines + [line_attr()] Lines[-1].set((length, angle, top_intersect_x)) except: lines_seen = lines_seen - 1 if lines_seen > 0: neg_length = neg_length/neg_len_count pos_length = pos_length/pos_len_count Length_Deviation = pos_length + neg_length if pos_length == 0 or neg_length == 0: #If one side gone pos_length = pos_length *2 neg_length = neg_length *2 Line_Distance = (abs(neg_length) + pos_length)/2 Total_Top = 0 Top_pos = [0, 0] #value, amount Top_neg = [0, 0] Total_Angle = 0 Angles = lines_seen #Total_Acute_Angle = 0 #Total_Obtuse_Angle = 0 #Total_Right_Angle = 0 #Angle_Counter = [1, 1, 1, 3] #Acute, Obtuse, Right, Types for i in range(line_amount): try: if Lines[i].top_intersect_x > center[0]: Top_pos[0] = Top_pos[0] + Lines[i].top_intersect_x Top_pos[1] = Top_pos[1] + 1 elif Lines[i].top_intersect_x < center[0]: Top_neg[0] = Top_neg[0] + Lines[i].top_intersect_x Top_neg[1] = Top_neg[1] + 1 if Lines[i].angle < np.pi/2: Angle = Lines[i].angle + np.pi/2 elif Lines[i].angle > np.pi/2 and Lines[i].angle < np.pi: Angle = Lines[i].angle - np.pi/2 elif Lines[i].angle> np.pi and Lines[i].angle< np.pi*3/2: Angle = Lines[i].angle - np.pi/2 elif Lines[i].angle> np.pi*3/2: Angle = Lines[i].angle - np.pi*3/2 else: Angle = 0 #print(Angle) Total_Angle = Total_Angle + Angle ''' if Angle < np.pi/2: Total_Acute_Angle = Total_Acute_Angle + Angle Angle_Counter[0] += 1 elif Angle > np.pi/2: Total_Obtuse_Angle = Total_Obtuse_Angle + Angle Angle_Counter[1] += 1 elif Angle == np.pi/2: Total_Right_Angle = Total_Right_Angle + Angle Angle_Counter[2] += 1 ''' except: pass Total_Angle = Total_Angle/Angles if Top_pos[1] != 0 and Top_neg[1] != 0: Total_Top = (Top_pos[0]/Top_pos[1] + Top_neg[0]/Top_neg[1])/2 elif Top_pos[1] == 0: Total_Top = (Top_neg[0]/Top_neg[1]) elif Top_neg[1] == 0: Total_Top = (Top_pos[0]/Top_pos[1]) ''' Angles.append(Total_Acute_Angle/Angle_Counter[0]) Angles.append(Total_Obtuse_Angle/Angle_Counter[1]) Angles.append(Total_Right_Angle/Angle_Counter[2]) for i in range(3): if Angle_Counter[i] == 1: Angle_Counter[3] -= 1 else: pass Total_Angle = Total_Angle + Angles[i] Total_Angle = Total_Angle/Angle_Counter[3] ''' cv2.circle(process, (round(Total_Top), 0), 2, (0, 0, 255), 2) else: Total_Top = center[0] Line_Distance = (Updown_Deadzone[0]+Updown_Deadzone[1])/2 Total_Angle = np.pi/2 Length_Deviation = 0 ''' if Total_Top <center[0] - 30: print('Move Right') elif Total_Top > center[0] - 30 and Total_Top<center[0] + 30: print("Don't Move") else: print('Move Left') if Total_Angle > np.pi/2 - 0.2 and Total_Angle < np.pi/2 + 0.2: print("Don't turn") elif Total_Angle > np.pi/2 + 0.2: print('Turn Left') elif Total_Angle < np.pi/2 - 0.2: print('Turn Right') if Line_Distance > 100: print("Go Higher") elif Line_Distance<70: print("Go Lower") else: print("Height OK") print('\n\n\n\n\n\n') ''' ''' #Strafe_Power = Length_Deviation/(width/2) Yaw_Power =(Total_Top-width//2)/(width//2) Updown_Power = 0 if Line_Distance > 150: Updown_Power = (150-Line_Distance)/100 if Updown_Power > 1: Updown_Power = 1 elif Line_Distance < 100: Updown_Power = (100-Line_Distance)/100 Message = (Strafe_Power,Drive_Power,Yaw_Power,Updown_Power,0,0) #Strafe, drive, yaw, updown, 0, 0 ''' if Total_Top > center[0] - Strafe_Deadzone and Total_Top<center[0] + Strafe_Deadzone: Total_Top = width/2 Strafe_Power = (Total_Top-width/2)/(width/2) if Strafe_Power > 1: Strafe_Power = 1 elif Strafe_Power < -1: Strafe_Power = -1 if Total_Angle > np.pi/2 - Yaw_Deadzone and Total_Angle < np.pi/2 + Yaw_Deadzone: Total_Angle = np.pi/2 Yaw_Power = (-Total_Angle+np.pi/2)/(np.pi/2) if Line_Distance < Updown_Deadzone[1] and Line_Distance>Updown_Deadzone[0]: Line_Distance = max_line_distance/2 elif Line_Distance<=Updown_Deadzone[0]: Line_Distance = Line_Distance*((max_line_distance/2)/Updown_Deadzone[0]) elif Line_Distance >= Updown_Deadzone[1]: Line_Distance = ((Line_Distance - Updown_Deadzone[1])*((max_line_distance/2)/(max_line_distance-Updown_Deadzone[1])))+max_line_distance/2 Updown_Power = (Line_Distance-max_line_distance/2)/(max_line_distance/2) if Updown_Power > 1: Updown_Power = 1 elif Updown_Power < -1: Updown_Power = -1 #Value Modifiers Drive_Power = self.drive_power #0-1 Strafe_Power = PowerFunction(Strafe_Power, self.strafe_mod) Yaw_Power = PowerFunction(Yaw_Power, self.yaw_mod) Updown_Power = PowerFunction(Updown_Power, self.updown_mod) if self.show: cv2.imshow('frame',frame) cv2.imshow('process', process) Powers = [Strafe_Power,Drive_Power,Yaw_Power,Updown_Power,0,0] #Strafe, drive, yaw, updown, 0, 0 #print(Powers) cv2.waitKey(1) #pub.sendMessage("transectline", message = Powers) #a = [1, 2, 3, 4, 5, 6] #pub_to_manager("control-movement", message = ("transectline", Powers)) pub.sendMessage("control-movement", message = ("transectline", Powers)) else: if self.captureON == True: self.cap.release() cv2.destroyAllWindows() self.captureON = False
for line in lines: x1,y1,x2,y2 = line[0] cv.line(img,(x1,y1),(x2,y2),(0,255,0),2) cv.imshow("result", img) cv.waitKey(0) # HoughLines Code img = cv.imread('hallway.jpg') gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) edges = cv.Canny(gray,50,150) lines = cv.HoughLines(edges,1,np.pi/180,200) for line in lines: rho,theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*(a)) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) cv.line(img,(x1,y1),(x2,y2),(0,0,255),2) cv.imshow("result", img) cv.waitKey(0)