def testInvalidParameters(): with pytest.raises(SyntaxError): ed.edge_detect("", "") with pytest.raises(SyntaxError): ed.edge_detect(fin, "") with pytest.raises(SyntaxError): ed.edge_detect("", fout)
def angle_detect(img): up, down, left, right = edge_detect(img) up = up[0] down = down[0] left = left[1] right = right[1] center = [] center.append(img.shape[0] / 2) center.append(img.shape[1] / 2) left1 = search_v_edge(center[1], 0, 0.8 * up + 0.2 * down, img) left2 = search_v_edge(center[1], 0, 0.2 * up + 0.8 * down, img) return math.atan((left1 - left2) / (0.6 * up - 0.6 * down))
def testNormalCase(): # fout=tempfile.TemporaryFile() ed.edge_detect(fin, fout) assert os.path.isfile(fout) os.remove(fout)
with open('defects.txt','r') as f: for line in f.readlines(): #find the coordinate of the defect m=re.match(r'(.*?)(a.*?)_(\d+)_(\d+)',line) #if it is the first time a defect appear on the picture, make a copy of the picture and rotate it, remove black if not os.path.isfile(m.group(2)+'marked.bmp'): shutil.copy(m.group(2)+'.bmp',m.group(2)+'marked.bmp') orgin=cv2.imread(m.group(2)+'marked.bmp',0) ang=angle_detect(orgin) #rotate the picture so that the backlight panel is put straight rows,cols=orgin.shape M0 = cv2.getRotationMatrix2D((cols/2,rows/2),-ang*180/math.pi,1) orgin = cv2.warpAffine(orgin,M0,(cols,rows)) #detect the border of the backlight panel in the rotated picture up,down,left,right=edge_detect(orgin) #select the region of backlight panel in the picture orgin=orgin[int(round(up[0])):int(round(down[0])),int(round(left[1])):int(round(right[1]))] orgin=cv2.cvtColor(orgin,cv2.COLOR_GRAY2BGR) else: orgin=cv2.imread(m.group(2)+'marked.bmp',1) #get the defect part if side*(int(m.group(3))+1)<=orgin.shape[0] && side*(int(m.group(4))+1)<=orgin.shape[1]: defect=orgin[side*int(m.group(3))+1:side*(int(m.group(3))+1)-1,side*int(m.group(4))+1:side*(int(m.group(4))+1)-1] elif side*int(m.group(3))<orgin.shape[0] && side*int(m.group(4))<orgin.shape[1]: defect=orgin[orgin.shape[0]-side+1:orgin.shape[0]-1,orgin.shape[1]-side+1:orgin.shape[1]-1] #add border to the defect part defect=cv2.copyMakeBorder(defect,1,1,1,1,cv2.BORDER_CONSTANT,value=RED) #put the defect part with border back into the picture orgin[side*int(m.group(3)):side*(int(m.group(3))+1),side*int(m.group(4)):side*(int(m.group(4))+1)]=defect
from time import perf_counter import edge_detect import torch import cv2 import math #Create 10000x10000 grid (100m samples) size = 10000 circle = edge_detect.edge_detect(size) torch.set_printoptions(precision=10) #Edge detection t1 = perf_counter() result = circle.calc_fine() pi = circle.calc_pi() t4 = perf_counter() print("Pi =", pi) print("Error:", math.pi - pi) print("Total time for pi using edge detection:", t4 - t1) #Calculating pi without edge detection t5 = perf_counter() pi = circle.calc_raw() t6 = perf_counter() print("Pi =", pi) print("Error:", math.pi - pi) print("Total time for pi without edge detection:", t6 - t5) #Draw the detected edge #edge_detect.draw(result)
# Open a copy of the depth image # to change the contrast on the full-sized image img2 = cv2.imread(depth_im, -1) img2 = util.normalize_depth(img2) img2 = util.clahe(img2, iter=2) # crops the image img2 = img2[mouse_Y[0]:mouse_Y[1], mouse_X[0]:mouse_X[1]] P["img2"] = img2 # *********************************** SECTION 1 ***************************************** # FIND DEPTH / CURVATURE DISCONTINUITIES. curve_disc, depth_disc, edgelist = edge_detect(P) #CREATES LINE SEGMENTS seglist = line_seg(edgelist, tol=5) if context.ShowEdgeListWnd: draw.draw_edge_list(seglist, P) line_pairs = [] cntr_pairs = [] img_size = copy.deepcopy(P["img_size"]) height = img_size[0] width = img_size[1] blank_im = np.zeros((height, width, 3), np.uint8) print("img size", img_size)
#find the coordinate of defects image2 = 'b' + re.match(r'^a(\S*?).bmp', image).group(1) + '.png' img2 = cv2.imread(image2, 0) defect = imdiff(img, img2) #find the angle of the backlight panel with respect to the border of the picture ang = angle_detect(img) #rotate the picture so that the backlight panel is put straight rows, cols = img.shape M0 = cv2.getRotationMatrix2D((cols / 2, rows / 2), -ang * 180 / math.pi, 1) img = cv2.warpAffine(img, M0, (cols, rows)) #detect the border of the backlight panel in the rotated picture up, down, left, right = edge_detect(img) #select the region of backlight panel in the picture img = img[int(round(up[0])):int(round(down[0])), int(round(left[1])):int(round(right[1]))] #transformation matrix tm = np.array([[ math.cos(ang), math.sin(ang), (1 - math.cos(ang)) * cols / 2 - math.sin(ang) * rows / 2 ], [ -math.sin(ang), math.cos(ang), math.sin(ang) * cols / 2 + (1 - math.cos(ang)) * rows / 2
from os import listdir, getcwd from os.path import join, isfile import edge_detect as edge import cv2 import datetime as dt images_path = join(getcwd(), "../images") images = [f for f in listdir(images_path) if isfile(join(images_path, f))] for img_path in images: img = cv2.imread(join(images_path, img_path), 0) start_time = dt.datetime.now() edges = edge.edge_detect(img) end_time = dt.datetime.now() time = (end_time - start_time).microseconds print img_path + ":", time
for j in range(N): x = 0 for i_prime in range(height / M): for j_prime in range(width / N): if d[i * height / M + i_prime, j * width / N + j_prime] != 0: x += 1 if M * N * x / (height * width * 1.0) > T: motion[i, j] = 255 return motion # main loop while cap.isOpened(): ret, frame = cap.read() frame_filt = cv2.filter2D(frame, -1, kernel) frame_edge = edge.edge_detect(frame_filt) # cv2.imshow('normal', frame) # normal video cv2.imshow("edge", frame_edge) # edge detection video) motion = detect_motion(frame_edge, last_frame_edge) cv2.imshow("motion", cv2.resize(motion, (640, 480))) # motion detection video # cv2.imshow('difference', difference(frame_edge, last_frame_edge)) # update frames last_frame_edge = frame_edge # listen for exit if cv2.waitKey(1) & 0xFF == ord("q"): break cap.release()
for i in range(M): for j in range(N): x = 0 for i_prime in range(height/M): for j_prime in range(width/N): if d[i*height/M+i_prime, j*width/N+j_prime] != 0: x += 1 if M*N*x/(height*width*1.0) > T: motion[i, j] = 255 return motion # main loop while cap.isOpened(): ret, frame = cap.read() frame_filt = cv2.filter2D(frame, -1, kernel) frame_edge = edge.edge_detect(frame_filt) # cv2.imshow('normal', frame) # normal video cv2.imshow('edge', frame_edge) # edge detection video) motion = detect_motion(frame_edge, last_frame_edge) cv2.imshow('motion', cv2.resize(motion, (640, 480))) # motion detection video # cv2.imshow('difference', difference(frame_edge, last_frame_edge)) # update frames last_frame_edge = frame_edge # listen for exit if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release()
from os import listdir, getcwd from os.path import join, isfile import edge_detect as edge import cv2 import datetime as dt images_path = join(getcwd(), '../images') images = [f for f in listdir(images_path) if isfile(join(images_path, f))] for img_path in images: img = cv2.imread(join(images_path, img_path), 0) start_time = dt.datetime.now() edges = edge.edge_detect(img) end_time = dt.datetime.now() time = (end_time - start_time).microseconds print img_path + ":", time