def read(self) : frame=self.input.read() cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvCvtColor(frame,cv_rs,cv.CV_RGB2GRAY) frame = cv_rs if self.enabled : # I think these functions are too specialized for transforms cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(frame, frame, None, 1) cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : return [] else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : return [] else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next return final_contours return []
def getData(): frame = highgui.cvQueryFrame(capture) if frame is None: return None cv.cvSplit(frame, b_img, g_img, r_img, None) cv.cvInRangeS(r_img, 150, 255, r_img) cv.cvInRangeS(g_img, 0, 100, g_img) cv.cvInRangeS(b_img, 0, 100, b_img) cv.cvAnd(r_img, g_img, laser_img) cv.cvAnd(laser_img, b_img, laser_img) cv.cvErode(laser_img,laser_img) #,0,2) cv.cvDilate(laser_img,laser_img) c_count, contours = cv.cvFindContours (laser_img, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) if c_count: return returnEllipses(contours) else: return None
def read(self) : frame=self.input.read() if self.debug : raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels) cv.cvCopy(frame,raw_frame,None) self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB') if self.enabled : cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) # convert color cv.cvCvtColor(frame,cv_rs,cv.CV_BGR2GRAY) # invert the image cv.cvSubRS(cv_rs, 255, cv_rs, None); # threshold the image frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvThreshold(cv_rs, frame, self.threshold, 255, cv.CV_THRESH_BINARY) if self.debug : thresh_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3) cv.cvCvtColor(frame,thresh_frame,cv.CV_GRAY2RGB) self.thresh_frame_surface=pygame.image.frombuffer(thresh_frame.imageData,(frame.width,frame.height),'RGB') # I think these functions are too specialized for transforms cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(frame, frame, None, 1) cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : return [] else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : return [] else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next return final_contours return []
def main(): # ctrl+c to end global h,s,v,h2,v2,s2,d,e highgui.cvNamedWindow("Camera 1", 1) highgui.cvNamedWindow("Orig", 1) highgui.cvCreateTrackbar("H", "Camera 1", h, 256, tb_h) highgui.cvCreateTrackbar("S", "Camera 1", s, 256, tb_s) highgui.cvCreateTrackbar("V", "Camera 1", v, 256, tb_v) highgui.cvCreateTrackbar("H2", "Camera 1", h2, 256, tb_h2) highgui.cvCreateTrackbar("S2", "Camera 1", s2, 256, tb_s2) highgui.cvCreateTrackbar("V2", "Camera 1", v2, 256, tb_v2) highgui.cvCreateTrackbar("Dilate", "Camera 1", d, 30, tb_d) highgui.cvCreateTrackbar("Erode", "Camera 1", e, 30, tb_e) cap = highgui.cvCreateCameraCapture(1) highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_WIDTH, IMGW) highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_HEIGHT, IMGH) c = 0 t1 = tdraw = time.clock() t = 1 font = cv.cvInitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1) while c != 0x27: image = highgui.cvQueryFrame(cap) if not image: print "capture failed" break thresh = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,1) cv.cvSetZero(thresh) cv.cvCvtColor(image,image,cv.CV_RGB2HSV) cv.cvInRangeS(image, (h,s,v,0), (h2,s2,v2,0), thresh) result = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,3) cv.cvSetZero(result) cv.cvOr(image,image,result,thresh) for i in range(1,e): cv.cvErode(result,result) for i in range(1,d): cv.cvDilate(result,result) # floodfill objects back in, allowing threshold differences outwards t2 = time.clock() if t2 > tdraw+0.3: t = t2-t1 tdraw=t2 cv.cvPutText(result, "FPS: " + str(1 / (t)), (0,25), font, (255,255,255)) t1 = t2 highgui.cvShowImage("Orig", image) highgui.cvShowImage("Camera 1", result) c = highgui.cvWaitKey(10)
def read(self): frame = self.input.read() if self.debug : raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels) cv.cvCopy(frame,raw_frame,None) self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB') if self.enabled : cvt_red = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvSplit(frame,None,None,cvt_red,None) if self.debug : red_frame = cv.cvCreateImage(cv.cvSize(cvt_red.width,cvt_red.height),cvt_red.depth,3) cv.cvMerge(cvt_red,None,None,None,red_frame) self.red_frame_surface = pygame.image.frombuffer(red_frame.imageData,(cvt_red.width,cvt_red.height),'RGB') # I think these functions are too specialized for transforms cv.cvSmooth(cvt_red,cvt_red,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(cvt_red, cvt_red, None, 1) cv.cvDilate(cvt_red, cvt_red, None, 1) if self.debug : thresh_frame = cv.cvCreateImage(cv.cvSize(cvt_red.width,cvt_red.height),cvt_red.depth,3) cv.cvMerge(cvt_red,None,None,None,thresh_frame) self.thresh_frame_surface = pygame.image.frombuffer(cvt_red.imageData,(cvt_red.width,cvt_red.height),'RGB') cvpt_min = cv.cvPoint(0,0) cvpt_max = cv.cvPoint(0,0) t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max) print t if cvpt_max.x == 0 and cvpt_max.y == 0 : return [] return [(cvpt_max.x,cvpt_max.y)]
def detect_squares(self, img_grey, img_orig): """ Find squares within the video stream and draw them """ cv.cvClearMemStorage(self.faces_storage) N = 11 thresh = 5 sz = cv.cvSize(img_grey.width & -2, img_grey.height & -2) timg = cv.cvCloneImage(img_orig) pyr = cv.cvCreateImage(cv.cvSize(sz.width/2, sz.height/2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.squares_storage) squares = cv.CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height)) cv.cvReleaseImage(timg) # down-scale and upscale the image to filter out the noise cv.cvPyrDown(subimage, pyr, 7) cv.cvPyrUp(pyr, subimage, 7) cv.cvReleaseImage(pyr) tgrey = cv.cvCreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgrey cv.cvSplit(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if(l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.cvCanny(tgrey, img_grey, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.cvDilate(img_grey, img_grey, None, 1) else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cv.cvThreshold(tgrey, img_grey, (l+1)*255/N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.cvFindContours(img_grey, self.squares_storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0,0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.cvApproxPoly(contour, cv.sizeof_CvContour, self.squares_storage, cv.CV_POLY_APPROX_DP, cv.cvContourPerimeter(contours)*0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if(result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if(i >= 2): t = abs(self.squares_angle(result[i], result[i-2], result[i-1])) if s<t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if(s < 0.3): for i in range(4): squares.append(result[i]) cv.cvReleaseImage(tgrey) return squares
def detect_squares(self, img): """ Find squares within the video stream and draw them """ N = 11 thresh = 5 sz = cv.cvSize(img.width & -2, img.height & -2) timg = cv.cvCloneImage(img) gray = cv.cvCreateImage(sz, 8, 1) pyr = cv.cvCreateImage(cv.cvSize(sz.width / 2, sz.height / 2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.storage) squares = cv.CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height)) # down-scale and upscale the image to filter out the noise cv.cvPyrDown(subimage, pyr, 7) cv.cvPyrUp(pyr, subimage, 7) tgray = cv.cvCreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgray cv.cvSplit(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if (l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.cvCanny(tgray, gray, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.cvDilate(gray, gray, None, 1) else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cv.cvThreshold(tgray, gray, (l + 1) * 255 / N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.cvFindContours(gray, self.storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0, 0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.cvApproxPoly( contour, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, cv.cvContourPerimeter(contours) * 0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if (result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if (i >= 2): t = abs( self.squares_angle(result[i], result[i - 2], result[i - 1])) if s < t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if (s < 0.3): for i in range(4): squares.append(result[i]) i = 0 while i < squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i + 1]) pt.append(squares[i + 2]) pt.append(squares[i + 3]) # draw the square as a closed polyline cv.cvPolyLine(img, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0) i += 4 return img
def get_smoothed(self,image): cv.cvSmooth(image,image,cv.CV_GAUSSIAN,3,0,0,0) cv.cvErode(image,image,None,1) cv.cvDilate(image,image,None,1) return image
def dilateImage(image): cv.cvDilate(image, image, None, 5) return image