class CameoDepth(Cameo): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager( cv2.VideoCapture(0), self._windowManager, True) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._curveFilter = filters.BGRPortraCurveFilter() def run(self): """Run the main loop.""" self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame self._faceTracker.update(frame) faces = self._faceTracker.faces rects.swapRects(frame, frame, [face.faceRect for face in faces]) filters.strokeEdges(frame, frame) self._curveFilter.apply(frame, frame) if self._shouldDrawDebugRects: self._faceTracker.drawDebugRects(frame) self._captureManager.exitFrame() self._windowManager.processEvents()
def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._curveFilter = filters.BGRProviaCurveFilter() self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False
def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager( cv2.VideoCapture("videos/Megamind.avi"), self._windowManager, False) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._curveFilter = filters.BGRPortraCurveFilter()
def __init__(self): self._windowManager = WindowManager('Cameo',self.onKeypress) device = depth.CV_CAP_OPENNI # uncomment for Microsoft Kinect #device = depth.CV_CAP_OPENNI_ASUS # uncomment for Asus Xtion self._captureManager = CaptureManager(cv2.VideoCapture(device), self._windowManager, True) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._curveFilter = filters.BGRPortraCurveFilter()
class CameoDepth(Cameo): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) device = depth.CV_CAP_OPENNI # uncomment for Microsoft Kinect #device = depth.CV_CAP_OPENNI_ASUS # uncomment for Asus Xtion self._captureManager = CaptureManager( cv2.VideoCapture(device), self._windowManager, True) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._curveFilter = filters.BGRPortraCurveFilter() def run(self): """Run the main loop.""" self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() self._captureManager.channel = \ depth.CV_CAP_OPENNI_DISPARITY_MAP disparityMap = self._captureManager.frame self._captureManager.channel = \ depth.CV_CAP_OPENNI_VALID_DEPTH_MASK validDepthMask = self._captureManager.frame self._captureManager.channel = \ depth.CV_CAP_OPENNI_BGR_IMAGE frame = self._captureManager.frame self._faceTracker.update(frame) faces = self._faceTracker.faces masks = [ depth.createMedianMask( disparityMap, validDepthMask, face.faceRect) \ for face in faces ] rects.swapRects(frame, frame, [face.faceRect for face in faces], masks) filters.strokeEdges(frame, frame) self._curveFilter.apply(frame, frame) if self._shouldDrawDebugRects: self._faceTracker.drawDebugRects(frame) self._captureManager.exitFrame() self._windowManager.processEvents()
def __init__(self): ''' Constructor ''' self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._curveFilter = filters.BGRCrossProcessCurveFilter()
class Cameo(object): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager( cv2.VideoCapture(0), self._windowManager, True) def run(self): """Run the main loop.""" self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame # TODO: Filter the frame (Chapter 3). self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress (self, keycode): """Handle a keypress. space -> Take a screenshot. tab -> Start/stop recording a screencast. escape -> Quit. """ if keycode == 32: # space self._captureManager.writeImage('screenshot.png') elif keycode == 9: # tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo('screencast.avi') else: self._captureManager.stopWritingVideo() elif keycode == 27: # escape self._windowManager.destroyWindow()
class Cameo(object): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) # self._curveFilter = filters.EmbossFilter() def run(self): self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame filters.strokeEdge(frame, frame) # self._curveFilter.apply(frame, frame) self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self, keycode): if keycode == 32: #space self._captureManager.writeImage('screenshot.png') elif keycode == 9: #tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo('screencast.avi') else: self._captureManager.stopWritingVideo() if keycode ==27:#esc self._windowManager.destroyWindow()
class Cameo(object): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager( cv2.VideoCapture(0), self._windowManager, True) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._curveFilter = filters.BGRPortraCurveFilter() def run(self): """Run the main loop.""" self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame if frame is not None: self._faceTracker.update(frame) faces = self._faceTracker.faces rects.swapRects(frame, frame, [face.faceRect for face in faces]) filters.strokeEdges(frame, frame) self._curveFilter.apply(frame, frame) if self._shouldDrawDebugRects: self._faceTracker.drawDebugRects(frame) self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self, keycode): """Handle a keypress. space -> Take a screenshot. tab -> Start/stop recording a screencast. x -> Start/stop drawing debug rectangles around faces. escape -> Quit. """ if keycode == 32: # space self._captureManager.writeImage('screenshot.png') elif keycode == 9: # tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo( 'screencast.avi') else: self._captureManager.stopWritingVideo() elif keycode == 120: # x self._shouldDrawDebugRects = \ not self._shouldDrawDebugRects elif keycode == 27: # escape self._windowManager.destroyWindow()
def __init__(self): self._windowManager = WindowManager('benFinder', self.onKeypress) device = depth.CV_CAP_FREENECT #device = 1 print "device=%d" % device self._captureManager = CaptureManager( device, self._windowManager, True) self._captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._backgroundSubtract = False self._autoBackgroundSubtract = False self._curveFilter = filters.BGRPortraCurveFilter() self.background_video_img = None self.background_depth_img = None self.autoBackgroundImg = None self._ts = TimeSeries() self._frameCount = 0
class Cameo(object): def __init__(self): self._window_manager = WindowManager('Cameo', self.on_keypress) self._capture_manager = CaptureManager(cv2.VideoCapture(0), self._window_manager, False) def run(self): self._window_manager.create_window() while self._window_manager.is_window_created: with self._capture_manager as f: frame = f.frame filters.recolor_CMV(frame, frame) self._window_manager.process_event() def on_keypress(self, keycode): if keycode == 27: self._window_manager.destroy_window()
class Cameo(object): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._curveFilter = filters.BGRPortraCurveFilter() def run(self): """Run the main loop.""" self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame # TODO: Filter the frame (Chapter 3). filters.strokeEdges(frame, frame) self._curveFilter.apply(frame, frame) self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self, keycode): """Handle a keypress. space -> Take a screenshot. tab -> Start/stop recording a screencast. escape -> Quit. """ if keycode == 32: # space self._captureManager.writeImage('screenshot.png') elif keycode == 9: # tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo('screencast.avi') else: self._captureManager.stopWritingVideo() elif keycode == 27: # escape self._windowManager.destroyWindow()
class Cameo(object): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) def run(self): ''' 主函数循环 ''' self._windowManager.createWindow() while self._windowManager.isWindowCreated: ''' 进入帧 ''' self._captureManager.enterFrame() frame = self._captureManager.frame ''' 目标:处理帧 ''' ''' 结束帧 ''' self._captureManager.exitFrame() self._windowManager.preocessEvent() def onKeypress(self, keycode): ''' 监测按键 SPACE 截屏 TAB 开始/停止录制视频 ESC 退出 ''' if keycode == 32: # SPACE self._captureManager.writeImage('screenshot.png') elif keycode == 27: # ESC self._windowManager.destroyWindow()
class Cameo(object): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv.VideoCapture(0), self._windowManager, True) self._curveFilter = filters.SharpenFilter() def run(self): """ 主函数循环 :return: """ self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame filters.strokeEdges(frame, frame) self._curveFilter.apply(frame, frame) self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self, keycode): """ 处理keypress space - > Take a screenshot tab - > Start/stop recording a screencast escape - > Quit :param keycode: :return: """ if keycode == 32: self._captureManager.writeImage('screenshot.png') elif keycode == 9: if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo('screencast.avi') else: self._captureManager.stopWritingVideo() elif keycode == 27: self._windowManager.destroyWindow()
class Cameo(object): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._curveFilter = filters.SharpenFilter() def run(self): """run the main loop""" self._windowManager.createWindow() while self._windowManager.isWindowsCreated: self._captureManager.enter_frame() frame = self._captureManager.frame filters.stokeEdges(frame, frame) self._curveFilter.apply(frame, frame) self._captureManager.exit_frame() self._windowManager.processEvents() def onKeypress(self, keycode): """ handle a keypress space take a screen shot tab start/stop recording a screencast escape quit :param keycode: :return: """ if keycode == 32: # space self._captureManager.writeImage('screenshot.png') elif keycode == 9: # tab if not self._captureManager.is_writing_video: self._captureManager.startWritingVideo('screencast.avi') else: self._captureManager.stopWritingVideo() elif keycode == 27: # escape self._windowManager.destroyWindows()
class Cameo(object): def __init__(self): self._window_manager = WindowManager('Cameo', self.on_key_press) self._capture_manager = CaptureManager(cv2.VideoCapture(0), self._window_manager, True) self._curve_filter = filters.BGRPortraCurveFilter() def run(self): """Run the main loop""" self._window_manager.create_window() while self._window_manager.is_window_created: self._capture_manager.enter_frame() frame = self._capture_manager.frame # filter the frame filters.stroke_edges(frame, frame) # emulating Portra film colors self._curve_filter.apply(frame, frame) self._capture_manager.exit_frame() self._window_manager.process_events() def on_key_press(self, key_code): """Handle a key press. space -> take a snapshot tab -> start/stop recording a screen cast escape -> quit """ if key_code == 32: # space self._capture_manager.write_image('screenshot.png') elif key_code == 9: # tab if not self._capture_manager.is_writing_video: self._capture_manager.start_writing_video('screencast.avi') else: self._capture_manager.stop_writing_video() elif key_code == 27: # escape self._window_manager.destory_window()
class Cameo(object): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._curveFilter = filters.SharpenFilter() def run(self): '''run the main loop''' self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame filters.strokeEdges(frame, frame) self._curveFilter.apply(frame, frame) self._captureManager.exitFrame() self._windowManager.processEvent() def onKeypress(self, keyCode): '''handle a key press space --> Task a screenshot tab --> start/stop a screen a screencast escape --> quit ''' print(keyCode) if keyCode == 32: #space self._captureManager.writeImage('screenshot.png') elif keyCode == 9: #tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo('screencast.avi') else: self._captureManager.stopWritingVideo() elif keyCode == 27: self._captureManager._capture.release() self._windowManager.destoryWindow()
def __init__(self, method, src): self.color = True self.motorsOn = False ### Sensitivity of tracker params self._sampleFreq = 0.1 #in sec ### Set Camera params #self.resolution = (640, 480 ) self.resolution = (1280, 960) source = { 0: 0, 1: 1, 2: 'led_move1.avi', 3: 'screencast.avi', 4: 'screencast 1.avi', 5: 'shortNoBox.avi', 6: 'longNoBox.avi', 7: 'H299.avi', 8: 'testRec.avi', 9: 'longDemo.avi' } self.captureSource = source[int(src)] ### Timing initialization self._startTime = time.time() self._lastCheck = self._startTime - self._sampleFreq ### Display params self.mirroredPreview = False ### Initialize Objects ##### Windows self._rawWindow = WindowManager('RawFeed', self.onKeypress) ### Capture -- resolution set here self._cap = CaptureManager(cv2.VideoCapture(self.captureSource), self._rawWindow, self.mirroredPreview, self.resolution) actualCols, actualRows = self._cap.getResolution() self.centerPt = utils.Point(actualCols / 2, actualRows / 2) ## from here on out use this resolution boundCols = 600 boundRows = 600 ### Arguments for finder # --> Pairs are always COLS, ROWS !!!!!!! self.finderArgs = { 'method': method, 'gsize': 45, 'gsig': 9, 'window': 3, 'MAXONEFRAME': 500, 'REFPING': 600000, 'MAXREF': 1000, 'captureSize': utils.Rect(actualCols, actualRows, self.centerPt), 'cropRegion': utils.Rect(100, 100, self.centerPt), 'decisionBoundary': utils.Rect(boundCols, boundRows, self.centerPt), 'color': self.color, 'motorsOn': self.motorsOn } self._wormFinder = WormFinder(**self.finderArgs) ##### Debugging # self._gaussianWindow = WindowManager('Gaussian', self.onKeypress) self._overlayWindow = WindowManager('Overlay', self.onKeypress)
class VideoAnalysis: def __init__(self, create_tracker_fun, file_path): self._window_manager = WindowManager( file_path, self.on_keypress ) self._capture_manager = CaptureManager( cv2.VideoCapture(file_path), self._window_manager, width=640 ) self._track_marks = [] self._create_tracker_fun = create_tracker_fun self._multi_tracker = cv2.MultiTracker_create() self._initial_roi_histograms = [] self._base_angle = None self._angle_change = [] self._frame_no = 0 def run(self): self._window_manager.create_window() self._capture_manager.paused = False frame_generator = self.frames() frame = next(frame_generator) rois = detect(frame) self.save_rois(frame, rois) previous_angle = None while self._window_manager.window_created and frame is not None: frame = next(frame_generator) ok, rois = self._multi_tracker.update(frame) if ok and len(rois) > 1: upper, lower = Rectangle(*rois[0]), Rectangle(*rois[1]) if not self._frame_no % 100: new_rois = detect(frame) if len(new_rois) >= 2: new_similarity = roi_similarity(frame, *new_rois[:2]) old_similarity = roi_similarity(frame, upper, lower) current_overlap = rois_overlap(upper, lower) if old_similarity < 0.4 and new_similarity - old_similarity > 0.4: for pair in zip(new_rois[:2], [upper, lower]): overlap = rois_overlap(*pair) if current_overlap > 0.2 or overlap < 0.4: self.save_rois(frame, new_rois[:2]) current_angle = self.to_angle(upper, lower) if current_angle != previous_angle: previous_angle = current_angle self._angle_change.append(self.to_angle(upper, lower)) base_angle_line = ( Point(upper.x, upper.y + upper.h//2), Point(lower.x, upper.y + upper.h//2)) current_angle_line = ( Point(upper.x, upper.y + upper.h//2), self.count_point(current_angle, upper, lower)) self._capture_manager.add_lines([current_angle_line, base_angle_line]) self._capture_manager.add_rois([upper, lower]) if not (previous_angle and self._capture_manager.paused): print(current_angle) elif not self._capture_manager.paused: print("Tracking failure") if frame is not None and not self._frame_no % 15: new_rois = detect(frame) self.save_rois(frame, new_rois) self._window_manager.process_events() if frame is None: self._window_manager.destroy_window() def save_rois(self, frame, rois): self._multi_tracker = cv2.MultiTracker_create() self._track_marks = [] for bounding_box in rois[:2]: self._track_marks.append(bounding_box) roi_fragment = bounding_box.clip_to_fit(frame.shape).sample_from_image(frame) histogram = cv2.calcHist(roi_fragment, [0], None, [100], [0, 255]) self._initial_roi_histograms.append(histogram) self._multi_tracker.add(self._create_tracker_fun(), frame, bounding_box.parameters) if len(self._track_marks) >= 2: self._base_angle = Point.point_angle( self._track_marks[0].corners[0], self._track_marks[1].corners[0] ) def to_angle(self, upper, lower): new_angle = Point.point_angle(upper.corners[0], lower.corners[0]) result = new_angle - self._base_angle if result < -180: result = 360 + result return result def count_point(self, angle, upper, lower): b = math.hypot(lower.x - upper.x, 0) c = b / math.cos(math.radians(angle)) a = c * math.sin(math.radians(angle)) new_y = upper.y + 0.5*upper.h + a return Point(lower.x, new_y) def frames(self): while True: self._capture_manager.enter_frame() yield self._capture_manager.frame self._frame_no += 1 self._capture_manager.exit_frame() def on_keypress(self, keycode): if keycode == 32: # space print("screenshot created") self._capture_manager.write_image('out/screenshot.png') elif keycode == 9: # tab if not self._capture_manager.is_writing_video: print("recording started") self._capture_manager.start_writing_video('out/screencast.avi') else: print("recording finished") self._capture_manager.stop_writing_video() elif keycode == 27: # escape print("exiting") self._window_manager.destroy_window() elif keycode == 0x0D: # enter if not self._capture_manager.paused: print("stop film") self._capture_manager.paused = True else: print("start film") self._capture_manager.paused = False
def __init__( self, method, src ): ### Sensitivity of tracker params self._sampleFreq = 0.1 #in sec ### Set Camera params #self.resolution = (640, 480 ) self.resolution = (1280, 960) source = { 0:0, 1:1, 2:'led_move1.avi', 3:'screencast.avi', 4:'screencast 1.avi', 5: 'shortNoBox.avi', 6: 'longNoBox.avi', 7: 'H299.avi', 8: 'testRec.avi', 9: 'longDemo.avi', 10: 'worm2014_05_05-12-44-53.avi' } self.color = True self.captureSource = source[int(src)] ### Timing initialization self._startTime = time.time() self._lastCheck = self._startTime - self._sampleFreq ### Display params self.mirroredPreview = False ### Initialize Objects ##### Windows self._rawWindow = WindowManager( 'RawFeed', self.onKeypress ) ### Capture -- resolution set here self._cap = CaptureManager( cv2.VideoCapture(self.captureSource), self._rawWindow, self.mirroredPreview, self.resolution) actualCols, actualRows = self._cap.getResolution() ## from here on out use this resolution ### Arguments for finder self.finderArgs = { 'method' : method, 'gsize' : 45, 'gsig' : 9, 'window' : 3, 'boundBoxRow' : 150, 'boundBoxCol' : 150, 'limRow' : 100, 'limCol' : 100, 'MAXONEFRAME': 500, 'REFPING' : 600000, 'MAXREF': 1000, 'capCols':actualCols, 'capRows': actualRows, 'color' : self.color } self._wormFinder = WormFinder( **self.finderArgs ) ##### Debugging self._overlayWindow = WindowManager( 'Overlay', self.onKeypress ) self.motorsOn = False
def __init__(self): self._window_manager = WindowManager('Cameo', self.on_key_press) self._capture_manager = CaptureManager(cv2.VideoCapture(0), self._window_manager, True) self._face_tracker = FaceTracker() self._should_draw_debug_rects = False self._curve_filter = filters.EmbossFilter() # can use any of applied filters
class Ball_Tracker(object): def __init__(self, windowName, capture): """ Constructor used to initialize the window manager, capture manager and ball detector Also initializes orangeLower and orangeUpper which is a range of orange values used to help with detection """ self._windowManager = WindowManager(windowName, self.onKeypress) self._captureManager = CaptureManager(capture, self._windowManager, True) self._ball_detector = Ball_Detector("ball_classifier.xml") self._paused = False # define a geneal orange color to help with detection self.orangeLower = (0,96,91) self.orangeUpper = (7,255,255) def run(self): """ Runs the main application. It performs the following actions: 1. Initializes variables to store the current ball direction amount, the angles and the max height 2. Sets up meanshift tracking for the basketball 3. Computes end points of small line to check which direction the ball is moving. 4. Computes angle between points. 5. Computes the highest point reached. 6. Continues to update the values frame by frame while the application is still running. 7. Returns the entry Angle, maximum ball height and exit angle if found. 8. Displays the corresponding values on screen. """ exitAngle = 0 entryAngle =0 upRight=0 downRight=0 foundEntryAngle=False foundExitAngle =False #initialize maxX,maxY values which represent the apex of the ball arc maxX=800 maxY=800 counter = 50 xCoord2 = 0 yCoord2 = 0 # define list of points self.points = deque(maxlen=32) self._windowManager.createWindow() # Find object coordinates using HaarCascade track_window = self._find_basketball() # Grab the next frame of the video to initialize tracking self._captureManager.enterFrame() frame = self._captureManager.frame self._captureManager.exitFrame() # Set up what is needed for the meanShift tracking hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_roi, self.orangeLower, self.orangeUpper) roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180]) cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX) term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) while self._windowManager.isWindowCreated: # Handle Video Pausing while self._paused: self._windowManager.processEvents() # Grab the next frame from the video entered = self._captureManager.enterFrame() if not entered: break frame = self._captureManager.frame # convert the current frame to HSV color space for back_projection calculation hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) back_project = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1) # Mean Shift. Returns the updated location of the object ret, track_window = cv2.meanShift(back_project, track_window, term_crit) (x,y,w,h) = track_window # Draw a rectangle around the ball cv2.rectangle(frame, (x,y), (x+w, y+h), 255, 2) #get the x and y coordinates of the ball xCoord = x+x+w/2 yCoord = y+y+h/2 # print ("This is xCoord and yCoord",xCoord,yCoord) # print ("This is xCoord2 and yCoord2",xCoord2,yCoord2) counter-=1 if (counter==0): counter = 50 xCoord2 = x+x+w/2 yCoord2 = y+y+h/2 # print("Updated") if (direction2(xCoord2,yCoord2,xCoord,yCoord,"upright")==True): # print("It's working!!!") upRight+=1 if (upRight>5 and foundExitAngle==False): foundExitAngle=True exitAngle=angle(xCoord,yCoord,2074,800) print("The exit angle was",exitAngle) if (direction2(xCoord,yCoord,xCoord2,yCoord2,"downright")==True): # print("It's working!!!") downRight+=1 if (downRight>5 and foundEntryAngle==False): foundEntryAngle=True entryAngle=angle(xCoord,yCoord,maxX,maxY) print( "The entry angle was",entryAngle ) # Find the max height of the ball if (maxY>yCoord): maxY=y maxX=x self.points.appendleft( (int(x+w/2), int(y+h/2)) ) # Draw a rectangle around the ball cv2.rectangle(frame, (x,y), (x+w, y+h), 255, 2) for i in range(1, len(self.points)): # if either of the tracked points are None, ignore # them if self.points[i - 1] is None or self.points[i] is None: continue # otherwise, compute the thickness of the line and # draw the connecting lines thickness = int(np.sqrt(32 / float(i + 1)) * 2.5) cv2.line(frame, self.points[i - 1], self.points[i], (0, 0, 255), thickness) # Set the frame to be displayed self._captureManager.frame = frame if (foundExitAngle): print( "The ball's max height was:",1000-maxY, "units." ) cv2.putText(frame,"Entry angle was "+str(int(entryAngle))+" degrees!",(10,25),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),3,0) cv2.putText(frame,"Exit angle was "+str(int(exitAngle))+" degrees!",(10,75),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),3,0) cv2.putText(frame,"Max height was "+str((1000-maxY)/float(200))+" M!",(10,125),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),3,0) # Signal we are done with the frame, write to cv2.imshow self._captureManager.exitFrame() # Always listen for specific keypress events # Triggered by onKeypress(keycode)! self._windowManager.processEvents() cv2.imshow("Frame", frame) def _find_basketball(self): """ Attempt to find basketball on screen Returns tracked window if found """ while True: entered = self._captureManager.enterFrame() frame = self._captureManager.frame track_window = self._ball_detector.find_object(frame) if not entered: break frame = self._captureManager.frame track_window = self._ball_detector.find_object(frame) if track_window is not False: self._captureManager.exitFrame() break self._captureManager.exitFrame() return track_window def onKeypress(self, keycode): """ Handle a keypress space -> Take a screenshot tab -> start / stop recording a screencast esc -> quit """ # spacebar if keycode == 32: self._captureManager.writeImage('screenshot.png') # tab elif keycode == 9: if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo('screencast.avi') else: self._captureManager.stopWritingVideo() # esc elif keycode == 27: self._windowManager.destroyWindow() elif keycode == ord('p'): self._paused = not self._paused
class Browser(object): def __init__(self, video_source): self._windowManager = WindowManager('Browser', self.onKeypress) self._captureManager = CaptureManager(video_source, self._windowManager, True) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._curveFilter = filters.BGRPortraCurveFilter() def run(self): """Run the main loop.""" self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame if frame is not None: print "got frame" self._faceTracker.update(frame) faces = self._faceTracker.faces rects.swapRects(frame, frame, [face.faceRect for face in faces]) #filters.strokeEdges(frame, frame) #self._curveFilter.apply(frame, frame) if self._shouldDrawDebugRects: self._faceTracker.drawDebugRects(frame) else: print "got None frame" print "press any key to exit." cv2.waitKey(0) break self._captureManager.exitFrame() waitkey_time = 1 if self._captureManager._video_source != 0: waitkey_time = 500 self._windowManager.processEvents(waitkey_time) def onKeypress(self, keycode): """Handle a keypress. space -> Take a screenshot. tab -> Start/stop recording a screencast. x -> Start/stop drawing debug rectangles around faces. escape -> Quit. """ if keycode == 32: # space self._captureManager.writeImage('screenshot.png') elif keycode == 9: # tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo( '/Users/xcbfreedom/Documents/screencast.avi') else: self._captureManager.stopWritingVideo() elif keycode == 120: # x self._shouldDrawDebugRects = \ not self._shouldDrawDebugRects elif keycode == 27: # escape self._windowManager.destroyWindow()
def __init__(self,video_source): self._windowManager = WindowManager('Browser', self.onKeypress) self._captureManager = CaptureManager(video_source, self._windowManager, True) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._curveFilter = filters.BGRPortraCurveFilter()
class Cameo(object): ''' Cameo object for the vision framework''' def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._curves = [None, filters.BGRCrossProcessCurveFilter(), filters.BGRPortraCurveFilter(), filters.BGRProviaCurveFilter(), filters.BGRVelviaCurveFilter()] self._curveIndex = 0 self._curveFilter = self._curves[self._curveIndex] self._recolorFilters = [None, filters.recolorCMV, filters.recolorRC, filters.recolorRGV] self._recolorIndex = 0 self._recolor = self._recolorFilters[self._recolorIndex] self._convolutionFilters = [None, filters.findEdgesFilter(), filters.sharpenFilter(), filters.blurFilter(), filters.embossFilter()] self._convolutionIndex = 0 self._convolution = self._convolutionFilters[self._convolutionIndex] self._strokeEdges = False self._faceTracker = FaceTracker() self._shouldDrawDebugRects = True def run(self): ''' Run the main loop''' self._windowManager.createWindow() self._windowManager.setStatus("K={},C={},R={},S={}".format(self._convolutionIndex,self._curveIndex,self._recolorIndex,self._strokeEdges)) print"Cameo Vision Framework\n"\ "Tab to start/stop recording\n"\ "Space to grab a screenshot\n"\ "r to cycle through recolor filters <none>, CMV, RC, RGV\n"\ "c to cycle through tonemapping curves <none>,crossprocess, porta, provia, velvia\n"\ "k to cycle through convolution filters <none>, find edges,sharpen, blur, emboss\n"\ "s to apply stroke edges filter\n" while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame self._faceTracker.update(frame) faces = self._faceTracker.faces rects.swapRects(frame,frame,[face.faceRect for face in faces]) if self._convolution is not None: self._convolution.apply(frame, frame) if self._curveFilter is not None: self._curveFilter.apply(frame, frame) if self._recolor is not None: self._recolor(frame, frame) if self._strokeEdges: filters.strokeEdges(frame, frame) filters.deSkew(frame,frame) if self._shouldDrawDebugRects: self._faceTracker.drawDebugRects(frame) self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self, keycode): ''' Handle keypresses Space -> take screenshot tab -> Start stop recording excape -> quit ''' rawkey = keycode # for handling esc etc keycode = chr(keycode & 255) if rawkey == 0x20: # space self._captureManager.writeImage('screenshot.png') elif rawkey == 0x09: #tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo('screencast.avi') else: self._captureManager.stopWritingVideo() elif rawkey == 0x1b: # escape self._windowManager.destroyWindow() elif keycode in ['c','C']: self._curveIndex += 1 if self._curveIndex >= len(self._curves): self._curveIndex = 0 self._curveFilter = self._curves[self._curveIndex] elif keycode in ['r','R']: self._recolorIndex += 1 if self._recolorIndex >= len(self._recolorFilters): self._recolorIndex = 0 self._recolor = self._recolorFilters[self._recolorIndex] elif keycode in ['k','K']: self._convolutionIndex += 1 if self._convolutionIndex >= len(self._convolutionFilters): self._convolutionIndex = 0 self._convolution = self._convolutionFilters[self._convolutionIndex] elif keycode in ['s','S']: if self._strokeEdges: self._strokeEdges = False else: self._strokeEdges = True elif keycode in ['x','X']: if self._shouldDrawDebugRects: self._shouldDrawDebugRects = False else: self._shouldDrawDebugRects = True statusString="K={},C={},R={},f={}".format(self._convolutionIndex,self._curveIndex,self._recolorIndex,self._strokeEdges) self._windowManager.setStatus(statusString)
def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True)
class FaceCapture(object): def __init__(self, name): self._configs = utils.loadConfigFile('facecapture.cf') self._users = utils.loadConfigFile('names.cf') self._name = name self._camera = cv2.VideoCapture(int(self._configs['capture.camera'])) self._camera.set(cv2.CAP_PROP_FRAME_WIDTH, int(self._configs['capture.width'])) self._camera.set(cv2.CAP_PROP_FRAME_HEIGHT, int(self._configs['capture.height'])) self._windowManager = WindowManager( 'Face Capture for {0}'.format(self._name), self.onKeypress) self._captureManager = CaptureManager(self._camera, self._windowManager, True) self._faceDetector = FaceDetector() self._filePath = '{0}'.format(self._configs['dataset.dir']) os.makedirs(self._filePath, exist_ok=True) def run(self): if not self._name in self._users: exit(1) self._windowManager.createWindow() idx = 1 while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame self._faceDetector.update(frame) faces = self._faceDetector.faces if len(faces) == 1: x, y, h, w = faces[0].faceRect face_image = frame[y:y + h, x:x + w] face_image = cv2.cvtColor(face_image, cv2.COLOR_RGB2GRAY) cv2.imshow('face', face_image) cv2.imwrite( '{0}\\{1}-{2}.jpg'.format(self._filePath, self._users[self._name], idx), face_image) idx += 1 time.sleep(0.1) self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self, keycode): if keycode == 27: # escape self._windowManager.destroyWindow() self._camera.release() elif keycode == ord('r'): # record if self._isRecording: self._isRecording = False else: self._isRecording = True
def __init__(self): self._windowManager = WindowManager("Cameo", self.onKeypress) self._captureManager = CaptureManager( cv2.VideoCapture("/home/abner0908/Videos/MyInputVid.avi"), self._windowManager, True )
def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._faceTracker = FaceTracker() self._shoulddrawRects = False
class Facedetect(object): def __init__(self): self._windowManager = WindowManager('Facedetect', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(camera_nr), self._windowManager, True) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = True self._curveFilter = filters.BGRPortraCurveFilter() def run(self): """Run the main loop.""" self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame if frame is not None: t = cv2.getTickCount() self._faceTracker.update(frame) faces = self._faceTracker.faces t = cv2.getTickCount() - t print("time taken for detection = %gms" % (t/(cv2.getTickFrequency())*1000.)) # uncomment this line for swapping faces #rects.swapRects(frame, frame, [face.faceRect for face in faces]) #filters.strokeEdges(frame, frame) #self._curveFilter.apply(frame, frame) if self._shouldDrawDebugRects: self._faceTracker.drawDebugRects(frame) self._faceTracker.drawLinesFromCenter(frame) self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self, keycode): """Handle a keypress. space -> Take a screenshot. tab -> Start/stop recording a screencast. x -> Start/stop drawing debug rectangles around faces. escape -> Quit. """ if keycode == 32: # space self._captureManager.writeImage('screenshot.png') elif keycode == 9: # tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo( 'screencast.avi') else: self._captureManager.stopWritingVideo() elif keycode == 120: # x self._shouldDrawDebugRects = \ not self._shouldDrawDebugRects elif keycode == 27: # escape self._windowManager.destroyWindow() # When everything is done, release the capture self._captureManager.release()
def __init__(self): self._windowManager = WindowManager('Facedetect', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(camera_nr), self._windowManager, True) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = True self._curveFilter = filters.BGRPortraCurveFilter()
class Cameo(object): def __init__(self, windowName='Cameo', _shouldMirrorPreview=True): self._windowManager = WindowManager(windowName, self.onKeypress) self._captureManager = CaptureManager( capture=cv2.VideoCapture(0), previewWindowManager=self._windowManager, shouldMirrorPreview=_shouldMirrorPreview) self._pictureNumber: int = 0 self._videoNumber: int = 0 self._curveFilter = filters.BGRPortraCurveFilter() def run(self): """Run the main loop. :rtype: object """ # print the key-operation print("Press space to take a screenshot\n" + " escape to quit\n" + " tab to start/stop recording a screencast\n") self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame # TODO: Filter the frame (Chapter3). filters.strokeEdges(frame, frame) self._curveFilter.apply(frame, frame) self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self, keycode): """ Handle a keypress space -> Take a screenshot. tab -> Start/stop recording a screencast. escape -> Quit. """ if keycode == 32: # space self._pictureNumber += 1 print("Take a screenshot named screenshot" + str(self._pictureNumber) + ".png\n") self._captureManager.writeImage('screenshot' + str(self._pictureNumber) + ".png") elif keycode == 9: # tab if not self._captureManager.isWritingVideo: self._videoNumber += 1 print("Start recording a screencast...\n") self._captureManager.startWritingVideo('screencast' + str(self._videoNumber) + ".avi") else: self._captureManager.stopWritingVideo() print("Stop recording a screencast... \n" + "screencast" + str(self._videoNumber) + ".avi saved.\n") elif keycode == 27: # escape print("Quit.\n") self._windowManager.destroyWindow()
class Browser(object): def __init__(self,video_source): self._windowManager = WindowManager('Browser', self.onKeypress) self._captureManager = CaptureManager(video_source, self._windowManager, True) self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._curveFilter = filters.BGRPortraCurveFilter() def run(self): """Run the main loop.""" self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame if frame is not None: print "got frame" self._faceTracker.update(frame) faces = self._faceTracker.faces rects.swapRects(frame, frame, [face.faceRect for face in faces]) #filters.strokeEdges(frame, frame) #self._curveFilter.apply(frame, frame) if self._shouldDrawDebugRects: self._faceTracker.drawDebugRects(frame) else: print "got None frame" print "press any key to exit." cv2.waitKey(0) break self._captureManager.exitFrame() waitkey_time=1 if self._captureManager._video_source!=0: waitkey_time=500 self._windowManager.processEvents(waitkey_time) def onKeypress(self, keycode): """Handle a keypress. space -> Take a screenshot. tab -> Start/stop recording a screencast. x -> Start/stop drawing debug rectangles around faces. escape -> Quit. """ if keycode == 32: # space self._captureManager.writeImage('screenshot.png') elif keycode == 9: # tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo( '/Users/xcbfreedom/Documents/screencast.avi') else: self._captureManager.stopWritingVideo() elif keycode == 120: # x self._shouldDrawDebugRects = \ not self._shouldDrawDebugRects elif keycode == 27: # escape self._windowManager.destroyWindow()
def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) #kaz notes: enter a random color filter here #self._curveFilter = filters.BGRPortraCurveFilter() self._curveFilter = filters.BGRVelviaCurveFilter()
def __init__(self): #创建一个窗口,并将键盘的回调函数传入 self._windowManager = WindowManager('Cameo', self.onKeypress) #告诉程序数据来自摄像头,还有镜面效果 self._captureManager = Capturemanager(cv2.VideoCapture(0), self._windowManager, True)
class BenFinder(object): BACKGROUND_VIDEO_FNAME = "background_video.png" BACKGROUND_DEPTH_FNAME = "background_depth.png" def __init__(self): self._windowManager = WindowManager('benFinder', self.onKeypress) device = depth.CV_CAP_FREENECT #device = 1 print "device=%d" % device self._captureManager = CaptureManager( device, self._windowManager, True) self._captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False self._backgroundSubtract = False self._autoBackgroundSubtract = False self._curveFilter = filters.BGRPortraCurveFilter() self.background_video_img = None self.background_depth_img = None self.autoBackgroundImg = None self._ts = TimeSeries() self._frameCount = 0 def loadBackgroundImages(self): """ Load the background images to be used for background subtraction from disk files. """ self.background_video_img = cv2.imread(BenFinder.BACKGROUND_VIDEO_FNAME) self.background_depth_img = cv2.imread(BenFinder.BACKGROUND_DEPTH_FNAME, cv2.CV_LOAD_IMAGE_GRAYSCALE) def showBackgroundImage(self): """ Display the background image used for subtraction in a separate window """ # Load the images from disk if necessary. if (not self.background_depth_img or not self.background_video_img): self.loadBackgroundImages() # Display the correct image if (self._autoBackgroundSubtract): cv2.imshow("Auto Background Image", self.autoBackgroundImg) else: if (self._captureManager.channel == \ depth.CV_CAP_OPENNI_DEPTH_MAP): cv2.imshow("background_depth_img",self.background_depth_img) elif (self._captureManager.channel == \ depth.CV_CAP_OPENNI_BGR_IMAGE): cv2.imshow("background_video_img",self.background_video_img) else: print "Error - Invalid Channel %d." % \ self._captureManager.channel def run(self): """Run the main loop.""" self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame if frame is not None: if (self._backgroundSubtract): if (self._autoBackgroundSubtract): if (self._captureManager.channel == \ depth.CV_CAP_OPENNI_DEPTH_MAP): if (self.autoBackgroundImg == None): self.autoBackgroundImg = numpy.float32(frame) # First work out the region of interest by # subtracting the fixed background image # to create a mask. absDiff = cv2.absdiff(frame,self.background_depth_img) benMask,maskArea = filters.getBenMask(absDiff,8) cv2.accumulateWeighted(frame, self.autoBackgroundImg, 0.05) # Convert the background image into the same format # as the main frame. bg = cv2.convertScaleAbs(self.autoBackgroundImg, alpha=1.0) # Subtract the background from the frame image cv2.absdiff(frame,bg,frame) # Scale the difference image to make it more sensitive # to changes. cv2.convertScaleAbs(frame,frame,alpha=100) #frame = cv2.bitwise_and(frame,frame,dst=frame,mask=benMask) frame = cv2.multiply(frame,benMask,dst=frame,dtype=-1) bri = filters.getMean(frame,benMask) #print "%4.0f, %3.0f" % (bri[0],self._captureManager.fps) self._ts.addSamp(bri[0]) if (self._frameCount < 15): self._frameCount = self._frameCount +1 else: self._ts.plotRawData() self._ts.findPeaks() self._frameCount = 0 else: print "Auto background subtract only works for depth images!" else: if (self._captureManager.channel == \ depth.CV_CAP_OPENNI_DEPTH_MAP): cv2.absdiff(frame,self.background_depth_img,frame) benMask = filters.getBenMask(frame,8) bri = filters.getMean(frame,benMask) print bri elif (self._captureManager.channel == \ depth.CV_CAP_OPENNI_BGR_IMAGE): cv2.absdiff(frame,self.background_video_img,frame) else: print "Error - Invalid Channel %d." % \ self._captureManager.channel #ret,frame = cv2.threshold(frame,200,255,cv2.THRESH_TOZERO) #self._faceTracker.update(frame) #faces = self._faceTracker.faces #if self._shouldDrawDebugRects: # self._faceTracker.drawDebugRects(frame) self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self, keycode): """Handle a keypress. space -> Take a screenshot. tab -> Start/stop recording a screencast. x -> Start/stop drawing debug rectangles around faces. a -> toggle automatic accumulated background subtraction on or off. b -> toggle simple background subtraction on or off. s -> Save current frame as background image. d -> Toggle between video and depth map view i -> Display the background image that is being used for subtraction. escape -> Quit. """ print "keycode=%d" % keycode if keycode == 32: # space self._captureManager.writeImage('screenshot.png') elif keycode == 9: # tab if not self._captureManager.isWritingVideo: print "Starting Video Recording..." self._captureManager.startWritingVideo( 'screencast.avi') else: print "Stopping video recording" self._captureManager.stopWritingVideo() elif keycode == 120: # x self._shouldDrawDebugRects = \ not self._shouldDrawDebugRects elif (chr(keycode)=='a'): # Autometic background subtraction if (self._autoBackgroundSubtract == True): print "Switching off auto background Subtraction" self.autoBackgroundImage = None self._autoBackgroundSubtract = False else: print "Switching on auto background subtraction" self._autoBackgroundSubtract = True elif (chr(keycode)=='b'): # Simple background subtraction if (self._backgroundSubtract == True): print "Switching off background Subtraction" self._backgroundSubtract = False else: print "Switching on background subtraction" self.loadBackgroundImages() self._backgroundSubtract = True elif (chr(keycode)=='d'): if (self._captureManager.channel == depth.CV_CAP_OPENNI_BGR_IMAGE): print "switching to depth map..." self._captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP else: print "switching to video" self._captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE elif (chr(keycode)=='i'): self.showBackgroundImage() elif (chr(keycode)=='s'): print "Saving Background Image" if (self._captureManager.channel == depth.CV_CAP_OPENNI_DEPTH_MAP): self._captureManager.writeImage(BenFinder.BACKGROUND_DEPTH_FNAME) elif (self._captureManager.channel == depth.CV_CAP_OPENNI_BGR_IMAGE): self._captureManager.writeImage(BenFinder.BACKGROUND_VIDEO_FNAME) else: print "Invalid Channel %d - doing nothing!" \ % self._captureManager.channel elif keycode == 27: # escape self._windowManager.destroyWindow()
def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, False) self._curveFilter = filters.BGRPortraCurveFilter() self._convolutionFilter = filters.FindEdgesFilter()
def __init__(self): self._windowManager = WindowManager('mak', keyPressCallback = self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture('http://192.168.0.101:8003/video'), self._windowManager, True)
class App(object): def __init__(self, args): assert args.identity is not None assert os.path.exists(args.output_dir) self._output_dir = os.path.join(args.output_dir, args.identity) if not os.path.exists(self._output_dir): os.mkdir(self._output_dir) self._identity = args.identity self._window_manager = WindowManager('Face Capture', self.on_keypress) self._capture_manager = CaptureManager(cv2.VideoCapture(0), self._window_manager, True) self._img_count = long(0) self._auto_write_img = False self._cropped = None self._image_size = args.image_size def run(self): """Run the main loop""" self._window_manager.create_window() while self._window_manager.is_window_created: self._capture_manager.enter_frame() frame = self._capture_manager.frame if frame is not None: # face detection faces = api.detect_faces(frame) for face in faces: self._cropped = cv2.resize(frame[face[1]:face[3], face[0]:face[2]], dsize=(self._image_size, self._image_size), interpolation=cv2.INTER_CUBIC) if self._auto_write_img: # face cropped from screen shot self.write_image(self._cropped) # mark face rectangle cv2.rectangle(frame, (face[0], face[1]), (face[2], face[3]), (0, 255, 0), 2) self._capture_manager.exit_frame() self._window_manager.process_events() def on_keypress(self, keycode): if keycode == 9: # tab self._auto_write_img = not self._auto_write_img elif keycode == 32: # space self.write_image(self._cropped) elif keycode == 27: # escape self._window_manager.destroy_window() def write_image(self, cropped): if cropped is None: print 'Cropped is none' return filename = os.path.join(self._output_dir, (self._identity.lower() + ('_%06d.jpg' % self._img_count))) mirror_filename = os.path.join( self._output_dir, (self._identity.lower() + ('_%06d_mirror.jpg' % self._img_count))) print 'Saving image ', filename # save to disk cv2.imwrite(filename, cropped) print 'Saving mirrored image ', mirror_filename mirror_cropped = np.fliplr(cropped).copy() cv2.imwrite(mirror_filename, mirror_cropped) # increase count self._img_count += 1
class Cameo(object): def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._curveFilter = filters.BGRProviaCurveFilter() self._faceTracker = FaceTracker() self._shouldDrawDebugRects = False def run(self): """ Run the main loop """ self._windowManager.createWindow() print("Window '{}' Created".format(self._windowManager.windowName)) print("\n{}\n{}\n{}\n{}".format("Controls:", "space --> Take a screenshot", "tab --> Start/stop recording a screencast", "escape --> Quit")) while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame self._faceTracker.update(frame) faces = self._faceTracker.faces rects.swapRects(frame, frame, [face.faceRect for face in faces]) # Add filtering to the frame filters.strokeEdges(frame,frame) self._curveFilter.apply(frame,frame) if self._shouldDrawDebugRects: self._faceTracker.drawDebugRects(frame) self._captureManager.exitFrame() self._windowManager.processEvents() def stop(self): print("[CAMEO] closing all processes") self._captureManager._capture.release() self._windowManager.destroyWindow() def onKeypress(self, keycode): """ Handle a keypress space --> Take a screenshot tab --> Start/stop recording a screencast x --> Toggle drawing debug rectangles around faces escape --> Quit """ if keycode == 32: # Space self._captureManager.writeImage('screenshot.png'); print("Writing image to file....") elif keycode == 9: # Tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo('screencast.avi') print("Writing video to file...") else: self._captureManager.stopWritingVideo() print("Stopped writing video") elif keycode == 120: # x self._shouldDrawDebugRects = not self._shouldDrawDebugRects print("Toggled drawing rectangles") elif keycode == 27: # escape print("Closing Window...") self._windowManager.destroyWindow()
class Cameo(object): def __init__(self): self.shootcount = 1 self.castcount = 1 self.mean = None self.sigma = None self._windowManager = WindowManager('REAL', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, shouldMirrorPreview=True) #shouldMirrorPreview can convert the image into its mirrored image #self._curveFilter = filters.SharpFilter() self._curveFilter = filters.EmbossFilter() def background(self): self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame self._captureManager.exitFrame() self._windowManager.processEvents() def run(self, k=3): self.Gaussian() self._windowManager.createWindow() cv2.namedWindow('Object') while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame self.judge(frame, k) self._captureManager.exitFrame() self._windowManager.processEvents() def judge(self, frame, k): gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) m, n = np.shape(gray) low = self.mean - k * self.sigma upper = self.mean + k * self.sigma temp = np.multiply((gray - low), (upper - gray)) temp[temp >= 0] = 0 temp[temp < 0] = 1 #show the moving object which doesn't contain in background new = (np.multiply(gray, temp)).astype(np.uint8) cv2.imshow('Object', new) def Gaussian(self, filename='screenshoot'): img = [] names = os.listdir(filename) for name in names: temp = filename + '/' + name img.append(cv2.imread(temp, cv2.IMREAD_GRAYSCALE)) m, n = np.shape(img[0]) mean = np.zeros((m, n)) var = np.zeros((m, n)) #calculate by matrix is faster for ele in img: mean += ele mean /= len(img) for ele in img: var += np.multiply((ele - mean), (ele - mean)) var /= len(img) self.mean = mean self.sigma = np.sqrt(var) def onKeypress(self, keycode): if keycode == 32: #space print(str(self.shootcount) + ' Screenshot Finished') self._captureManager.writeImage('screenshoot/' + str(self.shootcount) + '.png') self.shootcount += 1 elif keycode == 9: #tab if not self._captureManager.isWritingVideo: print(str(self.castcount) + ' Screencast Begins') self._captureManager.startWritingVideo('screencast/' + str(self.castcount) + '.avi') else: self._captureManager.stopWritingVideo() print(str(self.castcount) + ' Screencast Finished') self.castcount += 1 elif keycode == 27: #Esc self._windowManager.destroyWindow() self._captureManager._capture.release()
def __init__(self): self._windowManager = WindowManager('app') self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._eventsHandler = EventsHandler(self._captureManager, self._windowManager)
def __init__(self): self._window_manager = WindowManager('Cameo', self.on_keypress) self._capture_manager = CaptureManager(cv2.VideoCapture(0), self._window_manager, False)
def __init__(self, method, src): self.color = True self.motorsOn = False ### Sensitivity of tracker params self._sampleFreq = 0.1 # in sec ### Set Camera params # self.resolution = (640, 480 ) self.resolution = (1280, 960) source = { 0: 0, 1: 1, 2: "led_move1.avi", 3: "screencast.avi", 4: "screencast 1.avi", 5: "shortNoBox.avi", 6: "longNoBox.avi", 7: "H299.avi", 8: "testRec.avi", 9: "longDemo.avi", } self.captureSource = source[int(src)] ### Timing initialization self._startTime = time.time() self._lastCheck = self._startTime - self._sampleFreq ### Display params self.mirroredPreview = False ### Initialize Objects ##### Windows self._rawWindow = WindowManager("RawFeed", self.onKeypress) ### Capture -- resolution set here self._cap = CaptureManager( cv2.VideoCapture(self.captureSource), self._rawWindow, self.mirroredPreview, self.resolution ) actualCols, actualRows = self._cap.getResolution() self.centerPt = utils.Point(actualCols / 2, actualRows / 2) ## from here on out use this resolution boundCols = 600 boundRows = 600 ### Arguments for finder # --> Pairs are always COLS, ROWS !!!!!!! self.finderArgs = { "method": method, "gsize": 45, "gsig": 9, "window": 3, "MAXONEFRAME": 500, "REFPING": 600000, "MAXREF": 1000, "captureSize": utils.Rect(actualCols, actualRows, self.centerPt), "cropRegion": utils.Rect(100, 100, self.centerPt), "decisionBoundary": utils.Rect(boundCols, boundRows, self.centerPt), "color": self.color, "motorsOn": self.motorsOn, } self._wormFinder = WormFinder(**self.finderArgs) ##### Debugging # self._gaussianWindow = WindowManager('Gaussian', self.onKeypress) self._overlayWindow = WindowManager("Overlay", self.onKeypress)
def __init__(self): self._window_manager = WindowManager('Face Detector', self.on_key_press) self._capture_manager = CaptureManager(cv2.VideoCapture(0), self._window_manager, True) self._face_tracker = FaceTracker()
class Tracker(object): def __init__(self, method, src): self.color = True self.motorsOn = False ### Sensitivity of tracker params self._sampleFreq = 0.1 # in sec ### Set Camera params # self.resolution = (640, 480 ) self.resolution = (1280, 960) source = { 0: 0, 1: 1, 2: "led_move1.avi", 3: "screencast.avi", 4: "screencast 1.avi", 5: "shortNoBox.avi", 6: "longNoBox.avi", 7: "H299.avi", 8: "testRec.avi", 9: "longDemo.avi", } self.captureSource = source[int(src)] ### Timing initialization self._startTime = time.time() self._lastCheck = self._startTime - self._sampleFreq ### Display params self.mirroredPreview = False ### Initialize Objects ##### Windows self._rawWindow = WindowManager("RawFeed", self.onKeypress) ### Capture -- resolution set here self._cap = CaptureManager( cv2.VideoCapture(self.captureSource), self._rawWindow, self.mirroredPreview, self.resolution ) actualCols, actualRows = self._cap.getResolution() self.centerPt = utils.Point(actualCols / 2, actualRows / 2) ## from here on out use this resolution boundCols = 600 boundRows = 600 ### Arguments for finder # --> Pairs are always COLS, ROWS !!!!!!! self.finderArgs = { "method": method, "gsize": 45, "gsig": 9, "window": 3, "MAXONEFRAME": 500, "REFPING": 600000, "MAXREF": 1000, "captureSize": utils.Rect(actualCols, actualRows, self.centerPt), "cropRegion": utils.Rect(100, 100, self.centerPt), "decisionBoundary": utils.Rect(boundCols, boundRows, self.centerPt), "color": self.color, "motorsOn": self.motorsOn, } self._wormFinder = WormFinder(**self.finderArgs) ##### Debugging # self._gaussianWindow = WindowManager('Gaussian', self.onKeypress) self._overlayWindow = WindowManager("Overlay", self.onKeypress) def run(self): # Show windows self._rawWindow.createWindow() self._overlayWindow.createWindow() i = 0 while self._rawWindow.isWindowCreated: self._cap.enterFrame() frame = self._cap.frame # Probably not useful, removes errors when playing from video # if not self._captureManager.gotFrame: # self.shutDown() # break # Display raw frame to rawWindow t1 = time.time() # Get frame frame = self._cap.frame # Show frame to raw feed self._rawWindow.show(frame) # If tracking is enabled or motors are on, start tracking if time.time() - self._lastCheck >= self._sampleFreq: if self.finderArgs["method"] in ["lazyc", "lazyd", "lazy"]: self.gaussian = self._wormFinder.processFrame(frame) self.overlayImage = copy.deepcopy(frame) if self.motorsOn: self._wormFinder.decideMove() self._lastCheck = time.time() self._wormFinder.drawDebugCropped(self.overlayImage) self._wormFinder.drawTextStatus(self.overlayImage, self._cap.isWritingVideo, self.motorsOn) self._overlayWindow.show(self.overlayImage) # if self.gaussian is not None: # self._gaussianWindow.show(self.gaussian) # cv2.imwrite('g-%d.jpg' % i, self.gaussian ) # cv2.imwrite('o-%d.jpg' % i, self.overlayImage ) if self.finderArgs["method"] in ["test", "conf"]: # self.overlayImage = copy.deepcopy(frame) self._wormFinder.drawTest(frame) # self.overlayImage) # self._overlayWindow.show(self.overlayImage) i += 1 self._cap.exitFrame() self._rawWindow.processEvents() logt.info("processing: %0.6f" % (time.time() - t1)) @property def isDebug(self): return logt.getEffectiveLevel() <= logging.INFO def shutDown(self): self._rawWindow.destroyWindow() if self._cap.isWritingVideo: self._cap.stopWritingVideo() try: self._wormFinder.servos.disableMotors() self._wormFinder.servos.closeSerial() except Exception as e: logt.exception(str(e)) def onKeypress(self, keycode): """ Keypress options <SPACE> --- Motors On < TAB > --- start/stop recording screencast < ESC > --- quit """ if keycode == 32: # space if self.motorsOn: self.motorsOn = False # _captureManager.writeImage('screenshot.png') if not self.isDebug: self._wormFinder.servos.disableMotors() else: self.motorsOn = True self._wormFinder.launch = 0 if not self.isDebug: self._wormFinder.servos.enableMotors() self._wormFinder.launch = 0 time.sleep(2) elif keycode == 9: # tab if not self._cap.isWritingVideo: self._cap.startWritingVideo( "worm%s.avi" % time.strftime("%Y_%m_%d-%H-%M-%S", time.localtime(time.time())), cv2.cv.CV_FOURCC(*"MJPG"), ) else: self._cap.stopWritingVideo() elif keycode == 27: # escape self.shutDown()
class Tracker ( object ): def __init__( self, method, src ): ### Sensitivity of tracker params self._sampleFreq = 0.1 #in sec ### Set Camera params #self.resolution = (640, 480 ) self.resolution = (1280, 960) source = { 0:0, 1:1, 2:'led_move1.avi', 3:'screencast.avi', 4:'screencast 1.avi', 5: 'shortNoBox.avi', 6: 'longNoBox.avi', 7: 'H299.avi', 8: 'testRec.avi', 9: 'longDemo.avi', 10: 'worm2014_05_05-12-44-53.avi' } self.color = True self.captureSource = source[int(src)] ### Timing initialization self._startTime = time.time() self._lastCheck = self._startTime - self._sampleFreq ### Display params self.mirroredPreview = False ### Initialize Objects ##### Windows self._rawWindow = WindowManager( 'RawFeed', self.onKeypress ) ### Capture -- resolution set here self._cap = CaptureManager( cv2.VideoCapture(self.captureSource), self._rawWindow, self.mirroredPreview, self.resolution) actualCols, actualRows = self._cap.getResolution() ## from here on out use this resolution ### Arguments for finder self.finderArgs = { 'method' : method, 'gsize' : 45, 'gsig' : 9, 'window' : 3, 'boundBoxRow' : 150, 'boundBoxCol' : 150, 'limRow' : 100, 'limCol' : 100, 'MAXONEFRAME': 500, 'REFPING' : 600000, 'MAXREF': 1000, 'capCols':actualCols, 'capRows': actualRows, 'color' : self.color } self._wormFinder = WormFinder( **self.finderArgs ) ##### Debugging self._overlayWindow = WindowManager( 'Overlay', self.onKeypress ) self.motorsOn = False def run( self ): # Show windows self._rawWindow.createWindow() self._overlayWindow.createWindow() while self._rawWindow.isWindowCreated: self._cap.enterFrame() frame = self._cap.frame # Probably not useful, removes errors when playing from video # if not self._captureManager.gotFrame: # self.shutDown() # break # Display raw frame to rawWindow t1 = time.time() # Get frame frame = self._cap.frame # Show frame to raw feed self._rawWindow.show(frame) # If tracking is enabled or motors are on, start tracking if time.time() - self._lastCheck >= self._sampleFreq: if self.finderArgs['method'] in ['lazyc', 'lazyd']: self.gaussian = self._wormFinder.processFrame( frame ) self.overlayImage = copy.deepcopy(frame) if self.motorsOn: self._wormFinder.decideMove() self._lastCheck = time.time() self._wormFinder.drawDebugCropped( self.overlayImage) self._wormFinder.drawTextStatus(self.overlayImage,self._cap.isWritingVideo, self.motorsOn) self._overlayWindow.show(self.overlayImage) if self.finderArgs['method'] in ['test','conf']: self._wormFinder.drawTest( frame ) self._cap.exitFrame() self._rawWindow.processEvents() logt.debug('frame processing took: %0.6f' % (time.time() - t1)) @property def isDebug( self ): return logt.getEffectiveLevel() <= logging.INFO def shutDown( self ): self._rawWindow.destroyWindow() #if not self.isDebug: if self._cap.isWritingVideo: self._cap.stopWritingVideo() try: # self._wormFinder.writeOut('%s-%s' % (self.finderArgs['method'], self.captureSource)) self._wormFinder.servos.disableMotors() self._wormFinder.servos.closeSerial() except Exception as e: logt.exception(str(e)) def onKeypress ( self, keycode ): ''' Keypress options <SPACE> --- Motors On < TAB > --- start/stop recording screencast < ESC > --- quit ''' if keycode == 32: #space if self.motorsOn: self.motorsOn = False#_captureManager.writeImage('screenshot.png') if not self.isDebug: self._wormFinder.servos.disableMotors() #cv2.displayOverlay('Overlay','Motors disabled', 0) else: self.motorsOn = True self._wormFinder.launch = 0 if not self.isDebug: self._wormFinder.servos.enableMotors() self._wormFinder.launch = 0 time.sleep(2) #cv2.displayOverlay('Overlay','Motors enabled', 0) elif keycode == 9: #tab if not self._cap.isWritingVideo: self._cap.startWritingVideo( 'worm%s.avi' % time.strftime("%Y_%m_%d-%H-%M-%S", time.localtime(time.time())), cv2.cv.CV_FOURCC(*'MJPG')) # cv2.displayOverlay('Overlay','Writing Video', 0) else: self._cap.stopWritingVideo() # cv2.displayOverlay('Overlay','Not writing Video', 0) elif keycode == 27: #escape self.shutDown()
class Camera(object): def __init__(self): self._windowManager = WindowManager('Camer', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._filters = [ filters.BGRPortraCurveFilter(), filters.BGRProviaCurveFilter(), filters.BGRVelviaCurveFilter(), filters.BGRCrossProcessCurveFilter() ] self._currentFilter = 0 self._curveFliter = self._filters[self._currentFilter] self._faceTracker = FaceTracker() self._shoulddrawRects = False self._shouldswapFaces = False def run(self): """Run the main loop""" self._windowManager.createWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame #Tracking and swapping faces in a camera feed self._faceTracker.update(frame) tracked_faces = self._faceTracker.faces #Updating current filter self._curveFliter = self._filters[self._currentFilter] filters.strokeEdges(frame, frame) self._curveFliter.apply(frame, frame) if self._shouldswapFaces: rects.swapRects(frame, frame, [tf.faceRect for tf in tracked_faces]) if self._shoulddrawRects: self._faceTracker.drawRects(frame) self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self, keycode): """Handle a keypress space -> Take a screenshot. tab -> Start/stop recording a screencast escape -> Quit Enter -> Previous image filter Backspace -> Next image filter x -> Start/stop drawing rectangles q -> Start/stop swapping faces """ if keycode == 32: #space self._captureManager.writeImage('screenshot.png') elif keycode == 9: #tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo('screencast.mp4') else: self._captureManager.stopWritingVideo() elif keycode == 13: if self._currentFilter != 0: self._currentFilter -= 1 else: self._currentFilter = 3 elif keycode == 8: if self._currentFilter != 3: self._currentFilter += 1 else: self._currentFilter = 0 elif keycode == 120: #x self._shoulddrawRects = not self._shoulddrawRects elif keycode == 113: #q self._shouldswapFaces = not self._shouldswapFaces elif keycode == 27: #escape self._windowManager.destroyWindow()
def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) self._curveFilter = filters.BGRPortraCurveFilter()
class Tracker(object): def __init__(self, method, src): self.color = True self.motorsOn = False ### Sensitivity of tracker params self._sampleFreq = 0.1 #in sec ### Set Camera params #self.resolution = (640, 480 ) self.resolution = (1280, 960) source = { 0: 0, 1: 1, 2: 'led_move1.avi', 3: 'screencast.avi', 4: 'screencast 1.avi', 5: 'shortNoBox.avi', 6: 'longNoBox.avi', 7: 'H299.avi', 8: 'testRec.avi', 9: 'longDemo.avi' } self.captureSource = source[int(src)] ### Timing initialization self._startTime = time.time() self._lastCheck = self._startTime - self._sampleFreq ### Display params self.mirroredPreview = False ### Initialize Objects ##### Windows self._rawWindow = WindowManager('RawFeed', self.onKeypress) ### Capture -- resolution set here self._cap = CaptureManager(cv2.VideoCapture(self.captureSource), self._rawWindow, self.mirroredPreview, self.resolution) actualCols, actualRows = self._cap.getResolution() self.centerPt = utils.Point(actualCols / 2, actualRows / 2) ## from here on out use this resolution boundCols = 600 boundRows = 600 ### Arguments for finder # --> Pairs are always COLS, ROWS !!!!!!! self.finderArgs = { 'method': method, 'gsize': 45, 'gsig': 9, 'window': 3, 'MAXONEFRAME': 500, 'REFPING': 600000, 'MAXREF': 1000, 'captureSize': utils.Rect(actualCols, actualRows, self.centerPt), 'cropRegion': utils.Rect(100, 100, self.centerPt), 'decisionBoundary': utils.Rect(boundCols, boundRows, self.centerPt), 'color': self.color, 'motorsOn': self.motorsOn } self._wormFinder = WormFinder(**self.finderArgs) ##### Debugging # self._gaussianWindow = WindowManager('Gaussian', self.onKeypress) self._overlayWindow = WindowManager('Overlay', self.onKeypress) def run(self): # Show windows self._rawWindow.createWindow() self._overlayWindow.createWindow() i = 0 while self._rawWindow.isWindowCreated: self._cap.enterFrame() frame = self._cap.frame # Probably not useful, removes errors when playing from video # if not self._captureManager.gotFrame: # self.shutDown() # break # Display raw frame to rawWindow t1 = time.time() # Get frame frame = self._cap.frame # Show frame to raw feed self._rawWindow.show(frame) # If tracking is enabled or motors are on, start tracking if time.time() - self._lastCheck >= self._sampleFreq: if self.finderArgs['method'] in ['lazyc', 'lazyd', 'lazy']: self.gaussian = self._wormFinder.processFrame(frame) self.overlayImage = copy.deepcopy(frame) if self.motorsOn: self._wormFinder.decideMove() self._lastCheck = time.time() self._wormFinder.drawDebugCropped(self.overlayImage) self._wormFinder.drawTextStatus(self.overlayImage, self._cap.isWritingVideo, self.motorsOn) self._overlayWindow.show(self.overlayImage) # if self.gaussian is not None: # self._gaussianWindow.show(self.gaussian) # cv2.imwrite('g-%d.jpg' % i, self.gaussian ) # cv2.imwrite('o-%d.jpg' % i, self.overlayImage ) if self.finderArgs['method'] in ['test', 'conf']: # self.overlayImage = copy.deepcopy(frame) self._wormFinder.drawTest(frame) #self.overlayImage) # self._overlayWindow.show(self.overlayImage) i += 1 self._cap.exitFrame() self._rawWindow.processEvents() logt.info('processing: %0.6f' % (time.time() - t1)) @property def isDebug(self): return logt.getEffectiveLevel() <= logging.INFO def shutDown(self): self._rawWindow.destroyWindow() if self._cap.isWritingVideo: self._cap.stopWritingVideo() try: self._wormFinder.servos.disableMotors() self._wormFinder.servos.closeSerial() except Exception as e: logt.exception(str(e)) def onKeypress(self, keycode): ''' Keypress options <SPACE> --- Motors On < TAB > --- start/stop recording screencast < ESC > --- quit ''' if keycode == 32: #space if self.motorsOn: self.motorsOn = False #_captureManager.writeImage('screenshot.png') if not self.isDebug: self._wormFinder.servos.disableMotors() else: self.motorsOn = True self._wormFinder.launch = 0 if not self.isDebug: self._wormFinder.servos.enableMotors() self._wormFinder.launch = 0 time.sleep(2) elif keycode == 9: #tab if not self._cap.isWritingVideo: self._cap.startWritingVideo( 'worm%s.avi' % time.strftime("%Y_%m_%d-%H-%M-%S", time.localtime(time.time())), cv2.cv.CV_FOURCC(*'MJPG')) else: self._cap.stopWritingVideo() elif keycode == 27: #escape self.shutDown()
def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager( cv2.VideoCapture("../resource/video.mp4"), self._windowManager, True)
def __init__(self): self._windowManager = WindowManager('Cameo', self.onKeypress) self._captureManager = CaptureManager( cv2.VideoCapture(0), self._windowManager, True) self._curveFilter = filters.BGRPortraCurveFilter()