def findContours(image, getPolygon): storage = cv.cvCreateMemStorage(0) polyContourArray = [] polyStorage = cv.cvCreateMemStorage(0) nb_contours, contours = cv.cvFindContours(image, storage, cv.sizeof_CvContour, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0, 0)) if contours == None: return None contoursList = list(contours.hrange()) if not getPolygon: ret = contoursList else: for contour in contoursList: per = cvContourPerimeter(contour) polyContourArray.append( cv.cvApproxPoly(contour, cv.sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, per / PER_TOLERANCE, 0)) ret = polyContourArray return ret
def read(self) : frame=self.input.read() cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvCvtColor(frame,cv_rs,cv.CV_RGB2GRAY) frame = cv_rs if self.enabled : # I think these functions are too specialized for transforms cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(frame, frame, None, 1) cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : return [] else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : return [] else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next return final_contours return []
def findContours(image,getPolygon): storage = cv.cvCreateMemStorage (0) polyContourArray=[] polyStorage=cv.cvCreateMemStorage (0) nb_contours, contours = cv.cvFindContours (image, storage, cv.sizeof_CvContour, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint (0,0)) if contours==None: return None contoursList=list(contours.hrange()) if not getPolygon: ret=contoursList else: for contour in contoursList: per=cvContourPerimeter(contour) polyContourArray.append(cv.cvApproxPoly (contour, cv.sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, per/PER_TOLERANCE, 0)) ret=polyContourArray return ret
def read(self) : frame=self.input.read() if self.debug : raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels) cv.cvCopy(frame,raw_frame,None) self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB') if self.enabled : cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) # convert color cv.cvCvtColor(frame,cv_rs,cv.CV_BGR2GRAY) # invert the image cv.cvSubRS(cv_rs, 255, cv_rs, None); # threshold the image frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvThreshold(cv_rs, frame, self.threshold, 255, cv.CV_THRESH_BINARY) if self.debug : thresh_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3) cv.cvCvtColor(frame,thresh_frame,cv.CV_GRAY2RGB) self.thresh_frame_surface=pygame.image.frombuffer(thresh_frame.imageData,(frame.width,frame.height),'RGB') # I think these functions are too specialized for transforms cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(frame, frame, None, 1) cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : return [] else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : return [] else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next return final_contours return []
def read(self) : frame=self.input.read() if self.enabled : cv_rs = [cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) for i in range(3)] cv.cvSplit(frame,cv_rs[0],cv_rs[1],cv_rs[2],None) channel_contours = [] for frame in cv_rs[0] : # I think these functions are too specialized for transforms #cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) #cv.cvErode(frame, frame, None, 1) #cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : channel_contours.append([]) else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : channel_contours.append([]) else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next channel_contours.append(final_contours) return channel_contours
def detect_squares(self, img_grey, img_orig): """ Find squares within the video stream and draw them """ cv.cvClearMemStorage(self.faces_storage) N = 11 thresh = 5 sz = cv.cvSize(img_grey.width & -2, img_grey.height & -2) timg = cv.cvCloneImage(img_orig) pyr = cv.cvCreateImage(cv.cvSize(sz.width/2, sz.height/2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.squares_storage) squares = cv.CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height)) cv.cvReleaseImage(timg) # down-scale and upscale the image to filter out the noise cv.cvPyrDown(subimage, pyr, 7) cv.cvPyrUp(pyr, subimage, 7) cv.cvReleaseImage(pyr) tgrey = cv.cvCreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgrey cv.cvSplit(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if(l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.cvCanny(tgrey, img_grey, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.cvDilate(img_grey, img_grey, None, 1) else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cv.cvThreshold(tgrey, img_grey, (l+1)*255/N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.cvFindContours(img_grey, self.squares_storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0,0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.cvApproxPoly(contour, cv.sizeof_CvContour, self.squares_storage, cv.CV_POLY_APPROX_DP, cv.cvContourPerimeter(contours)*0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if(result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if(i >= 2): t = abs(self.squares_angle(result[i], result[i-2], result[i-1])) if s<t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if(s < 0.3): for i in range(4): squares.append(result[i]) cv.cvReleaseImage(tgrey) return squares
def detect_squares(self, img): """ Find squares within the video stream and draw them """ N = 11 thresh = 5 sz = cv.cvSize(img.width & -2, img.height & -2) timg = cv.cvCloneImage(img) gray = cv.cvCreateImage(sz, 8, 1) pyr = cv.cvCreateImage(cv.cvSize(sz.width / 2, sz.height / 2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.storage) squares = cv.CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height)) # down-scale and upscale the image to filter out the noise cv.cvPyrDown(subimage, pyr, 7) cv.cvPyrUp(pyr, subimage, 7) tgray = cv.cvCreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgray cv.cvSplit(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if (l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.cvCanny(tgray, gray, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.cvDilate(gray, gray, None, 1) else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cv.cvThreshold(tgray, gray, (l + 1) * 255 / N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.cvFindContours(gray, self.storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0, 0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.cvApproxPoly( contour, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, cv.cvContourPerimeter(contours) * 0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if (result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if (i >= 2): t = abs( self.squares_angle(result[i], result[i - 2], result[i - 1])) if s < t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if (s < 0.3): for i in range(4): squares.append(result[i]) i = 0 while i < squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i + 1]) pt.append(squares[i + 2]) pt.append(squares[i + 3]) # draw the square as a closed polyline cv.cvPolyLine(img, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0) i += 4 return img
highgui.cvNamedWindow ("image", 1) highgui.cvShowImage ("image", image) # create the storage area storage = cv.cvCreateMemStorage (0) # find the contours nb_contours, contours = cv.cvFindContours (image, storage, cv.sizeof_CvContour, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint (0,0)) # comment this out if you do not want approximation contours = cv.cvApproxPoly (contours, cv.sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, 3, 1) # create the window for the contours highgui.cvNamedWindow ("contours", 1) # create the trackbar, to enable the change of the displayed level highgui.cvCreateTrackbar ("levels+3", "contours", 3, 7, on_trackbar) # call one time the callback, so we will have the 1st display done on_trackbar (_DEFAULT_LEVEL) # wait a key pressed to end highgui.cvWaitKey (0)
# create window and display the original picture in it highgui.cvNamedWindow("image", 1) highgui.cvShowImage("image", image) # create the storage area storage = cv.cvCreateMemStorage(0) # find the contours nb_contours, contours = cv.cvFindContours(image, storage, cv.sizeof_CvContour, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0, 0)) # comment this out if you do not want approximation contours = cv.cvApproxPoly(contours, cv.sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, 3, 1) # create the window for the contours highgui.cvNamedWindow("contours", 1) # create the trackbar, to enable the change of the displayed level highgui.cvCreateTrackbar("levels+3", "contours", 3, 7, on_trackbar) # call one time the callback, so we will have the 1st display done on_trackbar(_DEFAULT_LEVEL) # wait a key pressed to end highgui.cvWaitKey(0)
target_mod = [] target_c = [] # Armazenamento temporario # Temporary storage storage1 = cv.cvCreateMemStorage() storage2 = cv.cvCreateMemStorage() # Carrega, binariza e procura o contorno dos padroes # Load, binarize and find contours for i in range(0,len(padroes)): target.append(hg.cvLoadImage(padroes[i], hg.CV_LOAD_IMAGE_GRAYSCALE)) target_mod.append(cv.cvCreateImage((target[i].width,target[i].height), cv.IPL_DEPTH_8U, 1)) cv.cvThreshold(target[i],target_mod[i],200,254,cv.CV_THRESH_BINARY) target_c.append(cv.cvFindContours(target_mod[i], storage1)[1]) target_c[i] = cv.cvApproxPoly(target_c[i],cv.sizeof_CvContour,storage1,cv.CV_POLY_APPROX_DP,1) frame_mod = cv.cvCreateImage((hg.cvGetCaptureProperty(capture,hg.CV_CAP_PROP_FRAME_WIDTH), hg.cvGetCaptureProperty(capture,hg.CV_CAP_PROP_FRAME_HEIGHT)), cv.IPL_DEPTH_8U, 1) hg.cvNamedWindow("Resultado") hg.cvNamedWindow("Binarizado") #strut = cv.cvCreateStructuringElementEx(3,3,0,0,cv.CV_SHAPE_CROSS) while True: turn = -1 # Captura o frame, binariza, procura por contornos e checa com os padroes # Capture frame, binarize, find contours and check with known patterns frame = hg.cvQueryFrame(capture) cv.cvCvtColor(frame, frame_mod,cv.CV_RGB2GRAY) cv.cvThreshold(frame_mod,frame_mod,120,254,cv.CV_THRESH_BINARY) hg.cvShowImage("Binarizado", frame_mod)