def getData(): frame = highgui.cvQueryFrame(capture) if frame is None: return None cv.cvSplit(frame, b_img, g_img, r_img, None) cv.cvInRangeS(r_img, 150, 255, r_img) cv.cvInRangeS(g_img, 0, 100, g_img) cv.cvInRangeS(b_img, 0, 100, b_img) cv.cvAnd(r_img, g_img, laser_img) cv.cvAnd(laser_img, b_img, laser_img) cv.cvErode(laser_img,laser_img) #,0,2) cv.cvDilate(laser_img,laser_img) c_count, contours = cv.cvFindContours (laser_img, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) if c_count: return returnEllipses(contours) else: return None
def read(self): frame = self.input.read() # which channels to combine cv_rs = [None]*4 #self.debug_print('channels:%s'%self.channels) # if frame only has one channel, just return it if frame.nChannels == 1 : for i in self.channels : cv_rs[i] = frame else : for i in self.channels : cv_rs[i] = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) #self.debug_print(cv_rs) # extract the color channel #print 'frame.nChannels',frame.nChannels cv.cvSplit(frame,cv_rs[0],cv_rs[1],cv_rs[2],cv_rs[3]) #cvt_im = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3) cv.cvMerge(cv_rs[0],cv_rs[1],cv_rs[2],cv_rs[3],frame) return frame
def read(self): frame=self.input.read() if self.enabled: cv_rs = [None]*4 cv_thresh = [0]*4 cv_max = [255]*4 for i in self.channels : cv_rs[i] = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv_thresh[i] = self.thresholds[i] cv_max[i] = self.max_thresholds[i] # extract the color channel cv.cvSplit(frame,cv_rs[0],cv_rs[1],cv_rs[2],cv_rs[3]) #self.debug_print(cv_rs) for i in self.channels : cv.cvThreshold(cv_rs[i],cv_rs[i],cv_thresh[i],cv_max[i],self.type) #cv_thresh = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3) cv.cvZero(frame) cv.cvMerge(cv_rs[0],cv_rs[1],cv_rs[2],cv_rs[3],frame) #frame = cv_thresh return frame
def getFilter(frameWidht, frameHeight): cvNamedWindow("Filtred") cvCreateTrackbar("hmax", "Filtred", getHlsFilter('hmax'), 180, trackBarChangeHmax) cvCreateTrackbar("hmin", "Filtred", getHlsFilter('hmin'), 180, trackBarChangeHmin) #cvCreateTrackbar("lmax", "Filtred", hlsFilter['lmax'], 255, trackBarChangeLmax) #cvCreateTrackbar("lmin", "Filtred", hlsFilter['lmin'], 255, trackBarChangeLmin) cvCreateTrackbar("smax", "Filtred", getHlsFilter('smax'), 255, trackBarChangeSmax) cvCreateTrackbar("smin", "Filtred", getHlsFilter('smin'), 255, trackBarChangeSmin) cvSetMouseCallback("Filtred", mouseClick, None) frame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3) hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3) filtredFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3) mask = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) hFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) lFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) sFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThHFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThLFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThSFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) key = -1 while key == -1: if not cvGrabFrame(CAM): print "Could not grab a frame" exit frame = cvQueryFrame(CAM) cvCvtColor(frame, hlsFrame, CV_BGR2HLS) cvSplit(hlsFrame, hFrame, lFrame, sFrame, None) pixelInRange(hFrame, getHlsFilter('hmin'), getHlsFilter('hmax'), 0, 180, ThHFrame) #pixelInRange(lFrame, getHlsFilter('lmin'), getHlsFilter('lmax'), 0, 255, ThLFrame) pixelInRange(sFrame, getHlsFilter('smin'), getHlsFilter('smax'), 0, 255, ThSFrame) cvSetZero(mask) cvAnd(ThHFrame, ThSFrame, mask) cvSetZero(filtredFrame) cvCopy(frame, filtredFrame, mask) cvShowImage("Filtred", filtredFrame) key = cvWaitKey(10) if key == 'r': key = -1 resetHlsFilter() cvDestroyWindow("Filtred")
def read(self): src = self.camera.read() thresh = self.thresh2pg.read() red = self.red2pg.read() raw_thresh = self.thresh.read() cvt_red = cv.cvCreateImage(cv.cvSize(raw_thresh.width,raw_thresh.height),raw_thresh.depth,1) cv.cvSplit(raw_thresh,cvt_red,None,None,None) cvpt_min = cv.cvPoint(0,0) cvpt_max = cv.cvPoint(0,0) t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max) return src,thresh,red,(cvpt_max.x,cvpt_max.y)
def read(self): raw_thresh = self.thresh.read() cvt_red = cv.cvCreateImage(cv.cvSize(raw_thresh.width,raw_thresh.height),raw_thresh.depth,1) cv.cvSplit(raw_thresh,cvt_red,None,None,None) cvpt_min = cv.cvPoint(0,0) cvpt_max = cv.cvPoint(0,0) t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max) if cvpt_max.x == 0 and cvpt_max.y == 0 : return [] return [(cvpt_max.x,cvpt_max.y)]
def read(self): frame = self.input.read() # image we'll eventually return # which channels to combine # if frame only has one channel, just return it if frame.nChannels == 1 : return frame else : cv_rs = [None]*4 cv_rs[self.channel] = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) # extract the color channel cv.cvSplit(frame,cv_rs[0],cv_rs[1],cv_rs[2],cv_rs[3]) return cv_rs[self.channel]
def startChroma(background, frameWidht, frameHeight): #cvNamedWindow("Original") cvNamedWindow("Chroma") hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3) transparency = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3) mask = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) hFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) lFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) sFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThHFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThLFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) ThSFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1) key = -1 while key == -1: if not cvGrabFrame(CAM): print "Could not grab a frame" exit frame = cvQueryFrame(CAM) cvCvtColor(frame, hlsFrame, CV_BGR2HLS) cvSplit(hlsFrame, hFrame, lFrame, sFrame, None) pixelInRange(hFrame, getHlsFilter('hmin'), getHlsFilter('hmax'), 0, 180, ThHFrame) #pixelInRange(lFrame, getHlsFilter('lmin'), getHlsFilter('lmax'), 0, 255, ThLFrame) pixelInRange(sFrame, getHlsFilter('smin'), getHlsFilter('smax'), 0, 255, ThSFrame) cvAnd(ThHFrame, ThSFrame, mask) cvCopy(background, frame, mask) cvShowImage("Chroma", frame) key = cvWaitKey(10) cvDestroyWindow("Chroma")
def read(self) : frame=self.input.read() if self.enabled : cv_rs = [cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) for i in range(3)] cv.cvSplit(frame,cv_rs[0],cv_rs[1],cv_rs[2],None) channel_contours = [] for frame in cv_rs[0] : # I think these functions are too specialized for transforms #cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) #cv.cvErode(frame, frame, None, 1) #cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : channel_contours.append([]) else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : channel_contours.append([]) else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next channel_contours.append(final_contours) return channel_contours
def read(self): frame = self.input.read() if self.debug : raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels) cv.cvCopy(frame,raw_frame,None) self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB') if self.enabled : cvt_red = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvSplit(frame,None,None,cvt_red,None) if self.debug : red_frame = cv.cvCreateImage(cv.cvSize(cvt_red.width,cvt_red.height),cvt_red.depth,3) cv.cvMerge(cvt_red,None,None,None,red_frame) self.red_frame_surface = pygame.image.frombuffer(red_frame.imageData,(cvt_red.width,cvt_red.height),'RGB') # I think these functions are too specialized for transforms cv.cvSmooth(cvt_red,cvt_red,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(cvt_red, cvt_red, None, 1) cv.cvDilate(cvt_red, cvt_red, None, 1) if self.debug : thresh_frame = cv.cvCreateImage(cv.cvSize(cvt_red.width,cvt_red.height),cvt_red.depth,3) cv.cvMerge(cvt_red,None,None,None,thresh_frame) self.thresh_frame_surface = pygame.image.frombuffer(cvt_red.imageData,(cvt_red.width,cvt_red.height),'RGB') cvpt_min = cv.cvPoint(0,0) cvpt_max = cv.cvPoint(0,0) t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max) print t if cvpt_max.x == 0 and cvpt_max.y == 0 : return [] return [(cvpt_max.x,cvpt_max.y)]
def _get_frame_channel(self,frame=None,channel=0) : if frame is None : frame = self._get_cv_frame() # if frame only has one channel, just return it if frame.nChannels == 1 : return frame # extract the color channel cv_r = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) if channel == 0 : cv.cvSplit(frame,cv_r,None,None,None) elif channel == 1 : cv.cvSplit(frame,None,cv_r,None,None) elif channel == 2 : cv.cvSplit(frame,None,None,cv_r,None) elif channel == 3 : cv.cvSplit(frame,None,None,None,cv_r) else : cv.cvCvtColor(frame,cv_r,cv.CV_BGR2GRAY) return cv_r
def main(args): global capture global hmax, hmin highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 0, 10) highgui.cvMoveWindow('Hue', 0, 350) highgui.cvMoveWindow('Satuation', 360, 10) highgui.cvMoveWindow('Value', 360, 350) highgui.cvMoveWindow('Laser', 700, 40) highgui.cvCreateTrackbar("Brightness Trackbar","Camera",0,255, change_brightness); highgui.cvCreateTrackbar("hmin Trackbar","Hue",hmin,180, change_hmin); highgui.cvCreateTrackbar("hmax Trackbar","Hue",hmax,180, change_hmax); highgui.cvCreateTrackbar("smin Trackbar","Satuation",smin,255, change_smin); highgui.cvCreateTrackbar("smax Trackbar","Satuation",smax,255, change_smax); highgui.cvCreateTrackbar("vmin Trackbar","Value",vmin,255, change_vmin); highgui.cvCreateTrackbar("vmax Trackbar","Value",vmax,255, change_vmax); print "grabbing camera" capture = highgui.cvCreateCameraCapture(0) print "found camera" highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_WIDTH, 320) highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, 240) frame = highgui.cvQueryFrame(capture) frameSize = cv.cvGetSize(frame) hsv = cv.cvCreateImage(frameSize,8,3) mask = cv.cvCreateImage(frameSize,8,1) hue = cv.cvCreateImage(frameSize,8,1) satuation = cv.cvCreateImage(frameSize,8,1) value = cv.cvCreateImage(frameSize,8,1) laser = cv.cvCreateImage(frameSize,8,1) while 1: frame = highgui.cvQueryFrame(capture) cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask) cv.cvSplit(hsv,hue,satuation,value,None) cv.cvInRangeS(hue,hmin,hmax,hue) cv.cvInRangeS(satuation,smin,smax,satuation) cv.cvInRangeS(value,vmin,vmax,value) #cv.cvInRangeS(hue,0,180,hue) cv.cvAnd(hue, value, laser) #cv.cvAnd(laser, value, laser) cenX,cenY = averageWhitePoints(laser) #print cenX,cenY draw_target(frame,cenX,cenY) #draw_target(frame,200,1) highgui.cvShowImage('Camera',frame) highgui.cvShowImage('Hue',hue) highgui.cvShowImage('Satuation',satuation) highgui.cvShowImage('Value',value) highgui.cvShowImage('Laser',laser) k = highgui.cvWaitKey(10) if k == " ": highgui.cvDestroyAllWindows() highgui.cvReleaseCapture (capture) sys.exit()
def main(args): global capture global hmax, hmin highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 0, 10) highgui.cvMoveWindow('Hue', 0, 350) highgui.cvMoveWindow('Satuation', 360, 10) highgui.cvMoveWindow('Value', 360, 350) highgui.cvMoveWindow('Laser', 700, 40) highgui.cvCreateTrackbar("Brightness Trackbar","Camera",0,255, change_brightness); highgui.cvCreateTrackbar("hmin Trackbar","Hue",hmin,180, change_hmin); highgui.cvCreateTrackbar("hmax Trackbar","Hue",hmax,180, change_hmax); highgui.cvCreateTrackbar("smin Trackbar","Satuation",smin,255, change_smin); highgui.cvCreateTrackbar("smax Trackbar","Satuation",smax,255, change_smax); highgui.cvCreateTrackbar("vmin Trackbar","Value",vmin,255, change_vmin); highgui.cvCreateTrackbar("vmax Trackbar","Value",vmax,255, change_vmax); print "grabbing camera" capture = highgui.cvCreateCameraCapture(0) print "found camera" highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_WIDTH, 320) highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, 240) frame = highgui.cvQueryFrame(capture) frameSize = cv.cvGetSize(frame) hsv = cv.cvCreateImage(frameSize,8,3) mask = cv.cvCreateImage(frameSize,8,1) hue = cv.cvCreateImage(frameSize,8,1) satuation = cv.cvCreateImage(frameSize,8,1) value = cv.cvCreateImage(frameSize,8,1) laser = cv.cvCreateImage(frameSize,8,1) turret = FuzzyController(frameSize.width,frameSize.height,True) move_count = 0 while 1: frame = highgui.cvQueryFrame(capture) cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask) cv.cvSplit(hsv,hue,satuation,value,None) cv.cvInRangeS(hue,cv.cvScalar(hmin),cv.cvScalar(hmax),hue) cv.cvInRangeS(satuation,cv.cvScalar(smin),cv.cvScalar(smax),satuation) cv.cvInRangeS(value,cv.cvScalar(vmin),cv.cvScalar(vmax),value) #cv.cvInRangeS(hue,0,180,hue) cv.cvAnd(hue, value, laser) #cv.cvAnd(laser, value, laser) cenX,cenY = averageWhitePoints(laser) #print cenX,cenY draw_target(frame,cenX,cenY) if(cenX != 0 and cenY != 0):# and move_count <= 0): turret.update(cenX,cenY,False) """ turret.reset() move_count = 3 if(cenX < 100): turret.left(20) elif(cenX > 200): turret.right(20) if(cenY < 80): turret.up(40) elif(cenY > 170): print "DOWN please.." turret.down(40) print cenY """ #move_count -= 1 #draw_target(frame,200,1) highgui.cvShowImage('Camera',frame) highgui.cvShowImage('Hue',hue) highgui.cvShowImage('Satuation',satuation) highgui.cvShowImage('Value',value) highgui.cvShowImage('Laser',laser) k = highgui.cvWaitKey(10) if k == 'q': sys.exit()
def main(args): global capture global hmax, hmin highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 0, 10) highgui.cvMoveWindow('Hue', 0, 350) highgui.cvMoveWindow('Satuation', 360, 10) highgui.cvMoveWindow('Value', 360, 350) highgui.cvMoveWindow('Laser', 700, 40) highgui.cvCreateTrackbar("Brightness Trackbar", "Camera", 0, 255, change_brightness) highgui.cvCreateTrackbar("hmin Trackbar", "Hue", hmin, 180, change_hmin) highgui.cvCreateTrackbar("hmax Trackbar", "Hue", hmax, 180, change_hmax) highgui.cvCreateTrackbar("smin Trackbar", "Satuation", smin, 255, change_smin) highgui.cvCreateTrackbar("smax Trackbar", "Satuation", smax, 255, change_smax) highgui.cvCreateTrackbar("vmin Trackbar", "Value", vmin, 255, change_vmin) highgui.cvCreateTrackbar("vmax Trackbar", "Value", vmax, 255, change_vmax) print "grabbing camera" capture = highgui.cvCreateCameraCapture(0) print "found camera" highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH, 320) highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT, 240) frame = highgui.cvQueryFrame(capture) frameSize = cv.cvGetSize(frame) hsv = cv.cvCreateImage(frameSize, 8, 3) mask = cv.cvCreateImage(frameSize, 8, 1) hue = cv.cvCreateImage(frameSize, 8, 1) satuation = cv.cvCreateImage(frameSize, 8, 1) value = cv.cvCreateImage(frameSize, 8, 1) laser = cv.cvCreateImage(frameSize, 8, 1) while 1: frame = highgui.cvQueryFrame(capture) cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask) cv.cvSplit(hsv, hue, satuation, value, None) cv.cvInRangeS(hue, hmin, hmax, hue) cv.cvInRangeS(satuation, smin, smax, satuation) cv.cvInRangeS(value, vmin, vmax, value) #cv.cvInRangeS(hue,0,180,hue) cv.cvAnd(hue, value, laser) #cv.cvAnd(laser, value, laser) cenX, cenY = averageWhitePoints(laser) #print cenX,cenY draw_target(frame, cenX, cenY) #draw_target(frame,200,1) highgui.cvShowImage('Camera', frame) highgui.cvShowImage('Hue', hue) highgui.cvShowImage('Satuation', satuation) highgui.cvShowImage('Value', value) highgui.cvShowImage('Laser', laser) k = highgui.cvWaitKey(10) if k == " ": highgui.cvDestroyAllWindows() highgui.cvReleaseCapture(capture) sys.exit()
frame = highgui.cvQueryFrame (capture) if frame is None: # no image captured... end the processing break # mirror the captured image cv.cvFlip (frame, None, 1) # compute the hsv version of the image cv.cvCvtColor (frame, hsv, cv.CV_BGR2HSV) # compute which pixels are in the wanted range cv.cvInRangeS (hsv, hsv_min, hsv_max, mask) # extract the hue from the hsv array cv.cvSplit (hsv, hue, None, None, None) # select the rectangle of interest in the hue/mask arrays hue_roi = cv.cvGetSubRect (hue, selection) mask_roi = cv.cvGetSubRect (mask, selection) # it's time to compute the histogram cv.cvCalcHist (hue_roi, hist, 0, mask_roi) # extract the min and max value of the histogram min_val, max_val = cv.cvGetMinMaxHistValue (hist, None, None) # compute the scale factor if max_val > 0: scale = 255. / max_val else:
frame = highgui.cvQueryFrame(capture) if frame is None: # no image captured... end the processing break # mirror the captured image #cv.cvFlip (frame, None, 1) # compute the hsv version of the image cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) # compute which pixels are in the wanted range cv.cvInRangeS(hsv, hsv_min, hsv_max, mask) # extract the hue from the hsv array cv.cvSplit(hsv, hue, None, None, None) # select the rectangle of interest in the hue/mask arrays hue_roi = cv.cvGetSubRect(hue, selection) mask_roi = cv.cvGetSubRect(mask, selection) # it's time to compute the histogram cv.cvCalcHist(hue_roi, hist, 0, mask_roi) # extract the min and max value of the histogram min_val, max_val, min_idx, max_idx = cv.cvGetMinMaxHistValue(hist) # compute the scale factor if max_val > 0: scale = 255. / max_val else:
def detect_squares(self, img_grey, img_orig): """ Find squares within the video stream and draw them """ cv.cvClearMemStorage(self.faces_storage) N = 11 thresh = 5 sz = cv.cvSize(img_grey.width & -2, img_grey.height & -2) timg = cv.cvCloneImage(img_orig) pyr = cv.cvCreateImage(cv.cvSize(sz.width/2, sz.height/2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.squares_storage) squares = cv.CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height)) cv.cvReleaseImage(timg) # down-scale and upscale the image to filter out the noise cv.cvPyrDown(subimage, pyr, 7) cv.cvPyrUp(pyr, subimage, 7) cv.cvReleaseImage(pyr) tgrey = cv.cvCreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgrey cv.cvSplit(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if(l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.cvCanny(tgrey, img_grey, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.cvDilate(img_grey, img_grey, None, 1) else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cv.cvThreshold(tgrey, img_grey, (l+1)*255/N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.cvFindContours(img_grey, self.squares_storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0,0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.cvApproxPoly(contour, cv.sizeof_CvContour, self.squares_storage, cv.CV_POLY_APPROX_DP, cv.cvContourPerimeter(contours)*0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if(result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if(i >= 2): t = abs(self.squares_angle(result[i], result[i-2], result[i-1])) if s<t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if(s < 0.3): for i in range(4): squares.append(result[i]) cv.cvReleaseImage(tgrey) return squares
def main(args): global capture global hmax, hmin highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Saturation', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 0, 10) highgui.cvMoveWindow('Hue', 0, 350) highgui.cvMoveWindow('Saturation', 360, 10) highgui.cvMoveWindow('Value', 360, 350) highgui.cvMoveWindow('Laser', 700, 40) highgui.cvCreateTrackbar("Brightness Trackbar","Camera",0,255, change_brightness); highgui.cvCreateTrackbar("hmin Trackbar","Hue",hmin,180, change_hmin); highgui.cvCreateTrackbar("hmax Trackbar","Hue",hmax,180, change_hmax); highgui.cvCreateTrackbar("smin Trackbar","Saturation",smin,255, change_smin); highgui.cvCreateTrackbar("smax Trackbar","Saturation",smax,255, change_smax); highgui.cvCreateTrackbar("vmin Trackbar","Value",vmin,255, change_vmin); highgui.cvCreateTrackbar("vmax Trackbar","Value",vmax,255, change_vmax); print "grabbing camera" capture = highgui.cvCreateCameraCapture(0) print "found camera" highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_WIDTH, iwidth) highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, iheight) frame = highgui.cvQueryFrame(capture) frameSize = cv.cvGetSize(frame) hsv = cv.cvCreateImage(frameSize,8,3) mask = cv.cvCreateImage(frameSize,8,1) hue = cv.cvCreateImage(frameSize,8,1) saturation = cv.cvCreateImage(frameSize,8,1) value = cv.cvCreateImage(frameSize,8,1) laser = cv.cvCreateImage(frameSize,8,1) while 1: frame = highgui.cvQueryFrame(capture) cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask) cv.cvSplit(hsv,hue,saturation,value,None) #print hmin, hmax cv.cvInRangeS(hue,cv.cvScalar(hmin),cv.cvScalar(hmax),hue) cv.cvInRangeS(saturation,cv.cvScalar(smin),cv.cvScalar(smax),saturation) cv.cvInRangeS(value,cv.cvScalar(vmin),cv.cvScalar(vmax),value) #cv.cvInRangeS(hue,cv.cvScalar(0),cv.cvScalar(180),hue) cv.cvAnd(hue, value, laser) #cv.cvAnd(laser, value, laser) # stupid filter #removeErrantPoints(laser) cenX,cenY = averageWhitePoints(laser) px = iwidth/2 - cenX dis = 57.18832855 / ( px - 5.702350176) + .05753797721 print cenX,px,dis draw_target(frame,cenX,cenY) #draw_target(frame,200,1) highgui.cvShowImage('Hue',hue) highgui.cvShowImage('Camera',frame) highgui.cvShowImage('Saturation',saturation) highgui.cvShowImage('Value',value) highgui.cvShowImage('Laser',laser) highgui.cvWaitKey(10)
def main(): print "OpenCV version: %s (%d, %d, %d)" % (cv.CV_VERSION, cv.CV_MAJOR_VERSION, cv.CV_MINOR_VERSION, cv.CV_SUBMINOR_VERSION) # create windows create_and_position_window('Thresholded_HSV_Image', 10, 10) create_and_position_window('RGB_VideoFrame', 10+cam_width, 10) create_and_position_window('Hue', 10, 10+cam_height) create_and_position_window('Saturation', 210, 10+cam_height) create_and_position_window('Value', 410, 10+cam_height) create_and_position_window('LaserPointer', 0,0) capture = setup_camera_capture() # create images for the different channels h_img = cv.cvCreateImage (cv.cvSize (cam_width,cam_height), 8, 1) s_img = cv.cvCreateImage (cv.cvSize (cam_width,cam_height), 8, 1) v_img = cv.cvCreateImage (cv.cvSize (cam_width,cam_height), 8, 1) laser_img = cv.cvCreateImage (cv.cvSize (cam_width,cam_height), 8, 1) cv.cvSetZero(h_img) cv.cvSetZero(s_img) cv.cvSetZero(v_img) cv.cvSetZero(laser_img) while True: # 1. capture the current image frame = highgui.cvQueryFrame (capture) if frame is None: # no image captured... end the processing break hsv_image = cv.cvCloneImage(frame) # temporary copy of the frame cv.cvCvtColor(frame, hsv_image, cv.CV_BGR2HSV) # convert to HSV # split the video frame into color channels cv.cvSplit(hsv_image, h_img, s_img, v_img, None) # Threshold ranges of HSV components. cv.cvInRangeS(h_img, hmin, hmax, h_img) cv.cvInRangeS(s_img, smin, smax, s_img) cv.cvInRangeS(v_img, vmin, vmax, v_img) # Perform an AND on HSV components to identify the laser! cv.cvAnd(h_img, v_img, laser_img) # This actually Worked OK for me without using Saturation. #cv.cvAnd(laser_img, s_img,laser_img) # Merge the HSV components back together. cv.cvMerge(h_img, s_img, v_img, None, hsv_image) #----------------------------------------------------- # NOTE: default color space in OpenCV is BGR!! # we can now display the images highgui.cvShowImage ('Thresholded_HSV_Image', hsv_image) highgui.cvShowImage ('RGB_VideoFrame', frame) highgui.cvShowImage ('Hue', h_img) highgui.cvShowImage ('Saturation', s_img) highgui.cvShowImage ('Value', v_img) highgui.cvShowImage('LaserPointer', laser_img) # handle events k = highgui.cvWaitKey (10) if k == '\x1b' or k == 'q': # user has press the ESC key, so exit break
def detect_squares(self, img): """ Find squares within the video stream and draw them """ N = 11 thresh = 5 sz = cv.cvSize(img.width & -2, img.height & -2) timg = cv.cvCloneImage(img) gray = cv.cvCreateImage(sz, 8, 1) pyr = cv.cvCreateImage(cv.cvSize(sz.width / 2, sz.height / 2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.storage) squares = cv.CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height)) # down-scale and upscale the image to filter out the noise cv.cvPyrDown(subimage, pyr, 7) cv.cvPyrUp(pyr, subimage, 7) tgray = cv.cvCreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgray cv.cvSplit(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if (l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.cvCanny(tgray, gray, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.cvDilate(gray, gray, None, 1) else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cv.cvThreshold(tgray, gray, (l + 1) * 255 / N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.cvFindContours(gray, self.storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0, 0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.cvApproxPoly( contour, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, cv.cvContourPerimeter(contours) * 0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if (result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if (i >= 2): t = abs( self.squares_angle(result[i], result[i - 2], result[i - 1])) if s < t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if (s < 0.3): for i in range(4): squares.append(result[i]) i = 0 while i < squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i + 1]) pt.append(squares[i + 2]) pt.append(squares[i + 3]) # draw the square as a closed polyline cv.cvPolyLine(img, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0) i += 4 return img
def compute_saliency(image): global thresh global scale saliency_scale = int(math.pow(2,scale)); bw_im1 = cv.cvCreateImage(cv.cvGetSize(image), cv.IPL_DEPTH_8U,1) cv.cvCvtColor(image, bw_im1, cv.CV_BGR2GRAY) bw_im = cv.cvCreateImage(cv.cvSize(saliency_scale,saliency_scale), cv.IPL_DEPTH_8U,1) cv.cvResize(bw_im1, bw_im) highgui.cvShowImage("BW", bw_im) realInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1); imaginaryInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1); complexInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 2); cv.cvScale(bw_im, realInput, 1.0, 0.0); cv.cvZero(imaginaryInput); cv.cvMerge(realInput, imaginaryInput, None, None, complexInput); dft_M = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.height - 1 ); dft_N = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.width - 1 ); dft_A = cv.cvCreateMat( dft_M, dft_N, cv.CV_32FC2 ); image_Re = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); image_Im = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); # copy A to dft_A and pad dft_A with zeros tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height)); cv.cvCopy( complexInput, tmp, None ); if(dft_A.width > bw_im.width): tmp = cv.cvGetSubRect( dft_A, cv.cvRect(bw_im.width,0, dft_N - bw_im.width, bw_im.height)); cv.cvZero( tmp ); cv.cvDFT( dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height ); cv.cvSplit( dft_A, image_Re, image_Im, None, None ); # Compute the phase angle image_Mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); image_Phase = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); #compute the phase of the spectrum cv.cvCartToPolar(image_Re, image_Im, image_Mag, image_Phase, 0) log_mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); cv.cvLog(image_Mag, log_mag) #Box filter the magnitude, then take the difference image_Mag_Filt = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); filt = cv.cvCreateMat(3,3, cv.CV_32FC1); cv.cvSet(filt,cv.cvScalarAll(-1.0/9.0)) cv.cvFilter2D(log_mag, image_Mag_Filt, filt, cv.cvPoint(-1,-1)) cv.cvAdd(log_mag, image_Mag_Filt, log_mag, None) cv.cvExp(log_mag, log_mag) cv.cvPolarToCart(log_mag, image_Phase, image_Re, image_Im,0); cv.cvMerge(image_Re, image_Im, None, None, dft_A) cv.cvDFT( dft_A, dft_A, cv.CV_DXT_INVERSE, complexInput.height) tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height)); cv.cvCopy( tmp, complexInput, None ); cv.cvSplit(complexInput, realInput, imaginaryInput, None, None) min, max = cv.cvMinMaxLoc(realInput); #cv.cvScale(realInput, realInput, 1.0/(max-min), 1.0*(-min)/(max-min)); cv.cvSmooth(realInput, realInput); threshold = thresh/100.0*cv.cvAvg(realInput)[0] cv.cvThreshold(realInput, realInput, threshold, 1.0, cv.CV_THRESH_BINARY) tmp_img = cv.cvCreateImage(cv.cvGetSize(bw_im1),cv.IPL_DEPTH_32F, 1) cv.cvResize(realInput,tmp_img) cv.cvScale(tmp_img, bw_im1, 255,0) return bw_im1
def main(args): global capture global hmax, hmin global stats, startTime highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Red Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Green Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Red Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Green Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 0, 10) highgui.cvMoveWindow('Value', 10, 420) highgui.cvMoveWindow('Red Laser', 360, 10) highgui.cvMoveWindow('Green Laser', 360, 360) highgui.cvMoveWindow('Red Hue',700, 10 ) highgui.cvMoveWindow('Green Hue',700, 420) highgui.cvCreateTrackbar("Brightness Trackbar","Camera",0,255, change_brightness); highgui.cvCreateTrackbar("vmin Trackbar","Value",vmin,255, change_vmin); highgui.cvCreateTrackbar("vmax Trackbar","Value",vmax,255, change_vmax); highgui.cvCreateTrackbar("red hmin Trackbar","Red Hue",red_hmin,180, change_red_hmin); highgui.cvCreateTrackbar("red hmax Trackbar","Red Hue",red_hmax,180, change_red_hmax); highgui.cvCreateTrackbar("green hmin Trackbar","Green Hue",green_hmin,180, change_green_hmin); highgui.cvCreateTrackbar("green hmax Trackbar","Green Hue",green_hmax,180, change_green_hmax); print "grabbing camera" capture = highgui.cvCreateCameraCapture(0) print "found camera" highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_WIDTH, iwidth) highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, iheight) frame = highgui.cvQueryFrame(capture) frameSize = cv.cvGetSize(frame) hsv = cv.cvCreateImage(frameSize,8,3) mask = cv.cvCreateImage(frameSize,8,1) red_hue = cv.cvCreateImage(frameSize,8,1) green_hue = cv.cvCreateImage(frameSize,8,1) saturation = cv.cvCreateImage(frameSize,8,1) value = cv.cvCreateImage(frameSize,8,1) red_laser = cv.cvCreateImage(frameSize,8,1) green_laser = cv.cvCreateImage(frameSize,8,1) turret = FuzzyController(frameSize.width,frameSize.height,True) while 1: frame = highgui.cvQueryFrame(capture) cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) cv.cvSplit(hsv,red_hue,saturation,value,None) cv.cvSplit(hsv,green_hue,saturation,value,None) cv.cvInRangeS(red_hue, cv.cvScalar(red_hmin), cv.cvScalar(red_hmax), red_hue) cv.cvInRangeS(green_hue, cv.cvScalar(green_hmin), cv.cvScalar(green_hmax), green_hue) cv.cvInRangeS(value, cv.cvScalar(vmin), cv.cvScalar(vmax), value) cv.cvAnd(red_hue, value, red_laser) cv.cvAnd(green_hue, value, green_laser) green_cenX,green_cenY = averageWhitePoints(green_laser) draw_target(frame, green_cenX, green_cenY, "GREEN") red_cenX, red_cenY = averageWhitePoints(red_laser) draw_target(frame, red_cenX, red_cenY, "RED") if(green_cenX >= 0 and green_cenY >= 0):# and move_count <= 0): turret.update(green_cenX,green_cenY) highgui.cvShowImage('Camera',frame) highgui.cvShowImage('Red Hue', red_hue) highgui.cvShowImage('Green Hue', green_hue) highgui.cvShowImage('Value',value) highgui.cvShowImage('Red Laser',red_laser) highgui.cvShowImage('Green Laser',green_laser) if stats: printRunningStats((green_cenX, green_cenY), (red_cenX, red_cenY)) k = highgui.cvWaitKey(10) if k == '\x1b' or k == 'q': sys.exit() if k == 'p': if stats: printTotalStats() stats = False else: startTime = time() stats = True
cv.cvZero(img_h) cv.cvZero(img_s) cv.cvZero(img_v) cv.cvZero(thresh_mask) highgui.cvShowImage("Input", img) # 5x5 Gaussian Blur cv.cvSmooth(img, img, cv.CV_GAUSSIAN, 5, 5) # convert to HSV cv.cvCvtColor(img, img, cv.CV_BGR2HSV) # threshold bad values cv.cvInRangeS(img, hsv_min, hsv_max, thresh_mask) cv.cvAnd(thresh_mask, mask_bw, thresh_mask) # Hue(0,180), Saturation(0,255), Value(0,255) cv.cvSplit(img, img_h, img_s, img_v, 0) # calculate histogram cv.cvCalcHist(img_h, h_hue, 0, thresh_mask) cv.cvCalcHist(img_s, h_sat, 0, thresh_mask) cv.cvCalcHist(img_v, h_val, 0, thresh_mask) # Don't normalize, use total mask pixels to calculate relative importance # cv.cvNormalizeHist(h_hue, 180) # cv.cvNormalizeHist(h_sat, 255) # cv.cvNormalizeHist(h_val, 255) # minv,maxv,minp,maxp = cv.cvMinMaxLoc(img_h) # print minv,maxv cv.cvZero(hist_hue_img) # hue_min,hue_max,min_loc,max_loc = cv.cvGetMinMaxHistValue(h_hue)
red = cv.cvCreateImage(frame_size, 8, 1) green = cv.cvCreateImage(frame_size, 8, 1) blue = cv.cvCreateImage(frame_size, 8, 1) while 1: # 1. capture the current image frame = highgui.cvQueryFrame (capture) if frame is None: # no image captured... end the processing break # compute the hsv version of the image cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) # compute the hue/sat/value from the hsv image cv.cvSplit(hsv, hue, saturation, value, None) cv.cvSplit(frame, blue, green, red, None); cv.cvCvtScale(value, val_scale, 1/100.); cv.cvCvtScale(val_scale, value, 1); # handle events k = highgui.cvWaitKey (10) # we can now display the images highgui.cvShowImage('Camera', frame) highgui.cvShowImage('HUE', hue) highgui.cvShowImage('SATURATION', saturation) highgui.cvShowImage('VALUE', value) highgui.cvShowImage('RED', red) highgui.cvShowImage('GREEN', green)