def __findcurve(self, img): storage = cv.cvCreateMemStorage(0) nb_contours, cont = cv.cvFindContours(img, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint(0, 0)) cidx = int(random.random() * len(color)) if (self.drawcontour): cv.cvDrawContours(self.drawimg, cont, _white, _white, 1, 1, cv.CV_AA, cv.cvPoint(0, 0)) idx = 0 for c in cont.hrange(): PointArray = cv.cvCreateMat(1, c.total, cv.CV_32SC2) PointArray2D32f = cv.cvCreateMat(1, c.total, cv.CV_32FC2) cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) fpoints = [] for i in range(c.total): kp = myPoint() kp.x = cv.cvGet2D(PointArray, 0, i)[0] kp.y = cv.cvGet2D(PointArray, 0, i)[1] kp.index = idx idx += 1 fpoints.append(kp) self.allcurve.append(fpoints) self.curvelength = idx
def read(self) : frame=self.input.read() cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvCvtColor(frame,cv_rs,cv.CV_RGB2GRAY) frame = cv_rs if self.enabled : # I think these functions are too specialized for transforms cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(frame, frame, None, 1) cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : return [] else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : return [] else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next return final_contours return []
def findContours(image, getPolygon): storage = cv.cvCreateMemStorage(0) polyContourArray = [] polyStorage = cv.cvCreateMemStorage(0) nb_contours, contours = cv.cvFindContours(image, storage, cv.sizeof_CvContour, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0, 0)) if contours == None: return None contoursList = list(contours.hrange()) if not getPolygon: ret = contoursList else: for contour in contoursList: per = cvContourPerimeter(contour) polyContourArray.append( cv.cvApproxPoly(contour, cv.sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, per / PER_TOLERANCE, 0)) ret = polyContourArray return ret
def __findcurve(self, img): storage = cv.cvCreateMemStorage(0) nb_contours, cont = cv.cvFindContours (img, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) cidx = int(random.random() * len(color)) if (self.drawcontour): cv.cvDrawContours (self.drawimg, cont, _white, _white, 1, 1, cv.CV_AA, cv.cvPoint (0, 0)) idx = 0 for c in cont.hrange(): PointArray = cv.cvCreateMat(1, c.total, cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, c.total , cv.CV_32FC2) cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) fpoints = [] for i in range(c.total): kp = myPoint() kp.x = cv.cvGet2D(PointArray,0, i)[0] kp.y = cv.cvGet2D(PointArray,0, i)[1] kp.index = idx idx += 1 fpoints.append(kp) self.allcurve.append(fpoints) self.curvelength = idx
def __findContour(self, filename): #find the contour of images, and save all points in self.vKeyPoints self.img = highgui.cvLoadImage (filename) self.grayimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1) self.drawimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,3) cv.cvCvtColor (self.img, self.grayimg, cv.CV_BGR2GRAY) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvThreshold( self.grayimg, self.grayimg, self.threshold, self.threshold +100, cv.CV_THRESH_BINARY ) cv.cvZero(self.drawimg) storage = cv.cvCreateMemStorage(0) nb_contours, cont = cv.cvFindContours (self.grayimg, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) cv.cvDrawContours (self.drawimg, cont, cv.cvScalar(255,255,255,0), cv.cvScalar(255,255,255,0), 1, 1, cv.CV_AA, cv.cvPoint (0, 0)) self.allcurve = [] idx = 0 for c in cont.hrange(): PointArray = cv.cvCreateMat(1, c.total , cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, c.total , cv.CV_32FC2) cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) fpoints = [] for i in range(c.total): kp = myPoint() kp.x = cv.cvGet2D(PointArray,0, i)[0] kp.y = cv.cvGet2D(PointArray,0, i)[1] kp.index = idx idx += 1 fpoints.append(kp) self.allcurve.append(fpoints) self.curvelength = idx
def findContours(image,getPolygon): storage = cv.cvCreateMemStorage (0) polyContourArray=[] polyStorage=cv.cvCreateMemStorage (0) nb_contours, contours = cv.cvFindContours (image, storage, cv.sizeof_CvContour, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint (0,0)) if contours==None: return None contoursList=list(contours.hrange()) if not getPolygon: ret=contoursList else: for contour in contoursList: per=cvContourPerimeter(contour) polyContourArray.append(cv.cvApproxPoly (contour, cv.sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, per/PER_TOLERANCE, 0)) ret=polyContourArray return ret
def getData(): frame = highgui.cvQueryFrame(capture) if frame is None: return None cv.cvSplit(frame, b_img, g_img, r_img, None) cv.cvInRangeS(r_img, 150, 255, r_img) cv.cvInRangeS(g_img, 0, 100, g_img) cv.cvInRangeS(b_img, 0, 100, b_img) cv.cvAnd(r_img, g_img, laser_img) cv.cvAnd(laser_img, b_img, laser_img) cv.cvErode(laser_img,laser_img) #,0,2) cv.cvDilate(laser_img,laser_img) c_count, contours = cv.cvFindContours (laser_img, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) if c_count: return returnEllipses(contours) else: return None
def read(self) : frame=self.input.read() if self.debug : raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels) cv.cvCopy(frame,raw_frame,None) self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB') if self.enabled : cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) # convert color cv.cvCvtColor(frame,cv_rs,cv.CV_BGR2GRAY) # invert the image cv.cvSubRS(cv_rs, 255, cv_rs, None); # threshold the image frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvThreshold(cv_rs, frame, self.threshold, 255, cv.CV_THRESH_BINARY) if self.debug : thresh_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3) cv.cvCvtColor(frame,thresh_frame,cv.CV_GRAY2RGB) self.thresh_frame_surface=pygame.image.frombuffer(thresh_frame.imageData,(frame.width,frame.height),'RGB') # I think these functions are too specialized for transforms cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(frame, frame, None, 1) cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : return [] else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : return [] else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next return final_contours return []
def get_contours(self,image,threshold): storage=cv.cvCreateMemStorage(0) image=self.clone_image(image) num_contours,contours=cv.cvFindContours(image,storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) contour_points=[[]]*num_contours for contour_index,contour in enumerate(contours.hrange()): for point in contour: contour_points[contour_index].append((point.x,point.y)) cv.cvReleaseImage(image) cv.cvReleaseMemStorage(storage) return contour_points
def read(self) : frame=self.input.read() if self.enabled : cv_rs = [cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) for i in range(3)] cv.cvSplit(frame,cv_rs[0],cv_rs[1],cv_rs[2],None) channel_contours = [] for frame in cv_rs[0] : # I think these functions are too specialized for transforms #cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) #cv.cvErode(frame, frame, None, 1) #cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : channel_contours.append([]) else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : channel_contours.append([]) else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next channel_contours.append(final_contours) return channel_contours
def detect_squares(self, img_grey, img_orig): """ Find squares within the video stream and draw them """ cv.cvClearMemStorage(self.faces_storage) N = 11 thresh = 5 sz = cv.cvSize(img_grey.width & -2, img_grey.height & -2) timg = cv.cvCloneImage(img_orig) pyr = cv.cvCreateImage(cv.cvSize(sz.width/2, sz.height/2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.squares_storage) squares = cv.CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height)) cv.cvReleaseImage(timg) # down-scale and upscale the image to filter out the noise cv.cvPyrDown(subimage, pyr, 7) cv.cvPyrUp(pyr, subimage, 7) cv.cvReleaseImage(pyr) tgrey = cv.cvCreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgrey cv.cvSplit(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if(l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.cvCanny(tgrey, img_grey, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.cvDilate(img_grey, img_grey, None, 1) else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cv.cvThreshold(tgrey, img_grey, (l+1)*255/N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.cvFindContours(img_grey, self.squares_storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0,0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.cvApproxPoly(contour, cv.sizeof_CvContour, self.squares_storage, cv.CV_POLY_APPROX_DP, cv.cvContourPerimeter(contours)*0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if(result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if(i >= 2): t = abs(self.squares_angle(result[i], result[i-2], result[i-1])) if s<t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if(s < 0.3): for i in range(4): squares.append(result[i]) cv.cvReleaseImage(tgrey) return squares
def process_image( slider_pos ): """ Define trackbar callback functon. This function find contours, draw it and approximate it by ellipses. """ stor = cv.cvCreateMemStorage(0); # Threshold the source image. This needful for cv.cvFindContours(). cv.cvThreshold( image03, image02, slider_pos, 255, cv.CV_THRESH_BINARY ); # Find all contours. nb_contours, cont = cv.cvFindContours (image02, stor, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) # Clear images. IPL use. cv.cvZero(image02); cv.cvZero(image04); # This cycle draw all contours and approximate it by ellipses. for c in cont.hrange(): count = c.total; # This is number point in contour # Number point must be more than or equal to 6 (for cv.cvFitEllipse_32f). if( count < 6 ): continue; # Alloc memory for contour point set. PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, count, cv.CV_32FC2) # Get contour point set. cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)); # Convert CvPoint set to CvBox2D32f set. cv.cvConvert( PointArray, PointArray2D32f ) box = cv.CvBox2D() # Fits ellipse to current contour. box = cv.cvFitEllipse2(PointArray2D32f); # Draw current contour. cv.cvDrawContours(image04, c, cv.CV_RGB(255,255,255), cv.CV_RGB(255,255,255),0,1,8,cv.cvPoint(0,0)); # Convert ellipse data from float to integer representation. center = cv.CvPoint() size = cv.CvSize() center.x = cv.cvRound(box.center.x); center.y = cv.cvRound(box.center.y); size.width = cv.cvRound(box.size.width*0.5); size.height = cv.cvRound(box.size.height*0.5); box.angle = -box.angle; # Draw ellipse. cv.cvEllipse(image04, center, size, box.angle, 0, 360, cv.CV_RGB(0,0,255), 1, cv.CV_AA, 0); # Show image. HighGUI use. highgui.cvShowImage( "Result", image04 );
cv.cvEllipse(image, cv.cvPoint(dx + 27, dy + 100), cv.cvSize(20, 35), 0, 0, 360, _white, -1, 8, 0) cv.cvEllipse(image, cv.cvPoint(dx + 273, dy + 100), cv.cvSize(20, 35), 0, 0, 360, _white, -1, 8, 0) # create window and display the original picture in it highgui.cvNamedWindow("image", 1) highgui.cvShowImage("image", image) # create the storage area storage = cv.cvCreateMemStorage(0) # find the contours nb_contours, contours = cv.cvFindContours(image, storage, cv.sizeof_CvContour, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0, 0)) # comment this out if you do not want approximation contours = cv.cvApproxPoly(contours, cv.sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, 3, 1) # create the window for the contours highgui.cvNamedWindow("contours", 1) # create the trackbar, to enable the change of the displayed level highgui.cvCreateTrackbar("levels+3", "contours", 3, 7, on_trackbar) # call one time the callback, so we will have the 1st display done on_trackbar(_DEFAULT_LEVEL)
def detect_squares(self, img): """ Find squares within the video stream and draw them """ N = 11 thresh = 5 sz = cv.cvSize(img.width & -2, img.height & -2) timg = cv.cvCloneImage(img) gray = cv.cvCreateImage(sz, 8, 1) pyr = cv.cvCreateImage(cv.cvSize(sz.width / 2, sz.height / 2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.storage) squares = cv.CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height)) # down-scale and upscale the image to filter out the noise cv.cvPyrDown(subimage, pyr, 7) cv.cvPyrUp(pyr, subimage, 7) tgray = cv.cvCreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgray cv.cvSplit(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if (l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.cvCanny(tgray, gray, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.cvDilate(gray, gray, None, 1) else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cv.cvThreshold(tgray, gray, (l + 1) * 255 / N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.cvFindContours(gray, self.storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0, 0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.cvApproxPoly( contour, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, cv.cvContourPerimeter(contours) * 0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if (result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if (i >= 2): t = abs( self.squares_angle(result[i], result[i - 2], result[i - 1])) if s < t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if (s < 0.3): for i in range(4): squares.append(result[i]) i = 0 while i < squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i + 1]) pt.append(squares[i + 2]) pt.append(squares[i + 3]) # draw the square as a closed polyline cv.cvPolyLine(img, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0) i += 4 return img
def process_image(slider_pos): """ Define trackbar callback functon. This function find contours, draw it and approximate it by ellipses. """ stor = cv.cvCreateMemStorage(0) # Threshold the source image. This needful for cv.cvFindContours(). cv.cvThreshold(image03, image02, slider_pos, 255, cv.CV_THRESH_BINARY) # Find all contours. nb_contours, cont = cv.cvFindContours(image02, stor, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint(0, 0)) # Clear images. IPL use. cv.cvZero(image02) cv.cvZero(image04) # This cycle draw all contours and approximate it by ellipses. for c in cont.hrange(): count = c.total # This is number point in contour # Number point must be more than or equal to 6 (for cv.cvFitEllipse_32f). if (count < 6): continue # Alloc memory for contour point set. PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2) PointArray2D32f = cv.cvCreateMat(1, count, cv.CV_32FC2) # Get contour point set. cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) # Convert CvPoint set to CvBox2D32f set. cv.cvConvert(PointArray, PointArray2D32f) box = cv.CvBox2D() # Fits ellipse to current contour. box = cv.cvFitEllipse2(PointArray2D32f) # Draw current contour. cv.cvDrawContours(image04, c, cv.CV_RGB(255, 255, 255), cv.CV_RGB(255, 255, 255), 0, 1, 8, cv.cvPoint(0, 0)) # Convert ellipse data from float to integer representation. center = cv.CvPoint() size = cv.CvSize() center.x = cv.cvRound(box.center.x) center.y = cv.cvRound(box.center.y) size.width = cv.cvRound(box.size.width * 0.5) size.height = cv.cvRound(box.size.height * 0.5) box.angle = -box.angle # Draw ellipse. cv.cvEllipse(image04, center, size, box.angle, 0, 360, cv.CV_RGB(0, 0, 255), 1, cv.CV_AA, 0) # Show image. HighGUI use. highgui.cvShowImage("Result", image04)
target = [] target_mod = [] target_c = [] # Armazenamento temporario # Temporary storage storage1 = cv.cvCreateMemStorage() storage2 = cv.cvCreateMemStorage() # Carrega, binariza e procura o contorno dos padroes # Load, binarize and find contours for i in range(0,len(padroes)): target.append(hg.cvLoadImage(padroes[i], hg.CV_LOAD_IMAGE_GRAYSCALE)) target_mod.append(cv.cvCreateImage((target[i].width,target[i].height), cv.IPL_DEPTH_8U, 1)) cv.cvThreshold(target[i],target_mod[i],200,254,cv.CV_THRESH_BINARY) target_c.append(cv.cvFindContours(target_mod[i], storage1)[1]) target_c[i] = cv.cvApproxPoly(target_c[i],cv.sizeof_CvContour,storage1,cv.CV_POLY_APPROX_DP,1) frame_mod = cv.cvCreateImage((hg.cvGetCaptureProperty(capture,hg.CV_CAP_PROP_FRAME_WIDTH), hg.cvGetCaptureProperty(capture,hg.CV_CAP_PROP_FRAME_HEIGHT)), cv.IPL_DEPTH_8U, 1) hg.cvNamedWindow("Resultado") hg.cvNamedWindow("Binarizado") #strut = cv.cvCreateStructuringElementEx(3,3,0,0,cv.CV_SHAPE_CROSS) while True: turn = -1 # Captura o frame, binariza, procura por contornos e checa com os padroes # Capture frame, binarize, find contours and check with known patterns frame = hg.cvQueryFrame(capture) cv.cvCvtColor(frame, frame_mod,cv.CV_RGB2GRAY) cv.cvThreshold(frame_mod,frame_mod,120,254,cv.CV_THRESH_BINARY)
cv.cvEllipse (image, cv.cvPoint (dx + 273, dy + 100), cv.cvSize (20, 35), 0, 0, 360, _white, -1, 8, 0) # create window and display the original picture in it highgui.cvNamedWindow ("image", 1) highgui.cvShowImage ("image", image) # create the storage area storage = cv.cvCreateMemStorage (0) # find the contours nb_contours, contours = cv.cvFindContours (image, storage, cv.sizeof_CvContour, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint (0,0)) # comment this out if you do not want approximation contours = cv.cvApproxPoly (contours, cv.sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, 3, 1) # create the window for the contours highgui.cvNamedWindow ("contours", 1) # create the trackbar, to enable the change of the displayed level highgui.cvCreateTrackbar ("levels+3", "contours", 3, 7, on_trackbar) # call one time the callback, so we will have the 1st display done