Exemplo n.º 1
0
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                              self._windowManager, True)

    def run(self):
        self._windowManager.createdWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            if frame is not None:
                pass
            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress(self, keycode):
        if keycode == 32:
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9:
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo('screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 27:
            self._windowManager.destoryWindow()
Exemplo n.º 2
0
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                              self._windowManager, True)
        self._curveFilter = filters.BGRPortraCurveFilter()

    def run(self):
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            filters.strokeEdges(frame, frame)
            self._curveFilter.apply(frame, frame)
            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress(self, keycode):
        """
        Handle a keypress.
        :param keycode:
        Space -> Take a screenshot.
        Tab ->Start/stop recording a screenshot.
        Escape ->Quit.
        """
        if keycode == 32:
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9:
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo("screencast.avi")
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 27:
            self._windowManager.destroyWindow()
Exemplo n.º 3
0
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                              self._windowManager, True)

        # 使用的过滤器
        self._sharpenFilter = filters.FindEdgesFilter()
Exemplo n.º 4
0
class Cameo(object):
    
    def __init__(self):
        self._windowManager = WindowManager('Cameo',self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),self._windowManager,True)
        
    def run(self):
        """run the main loop"""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            #TODO:Filter the frame (Chapter 3).
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
            
    def onKeypress(self,keycode):
        """Handle a keypress .
        space -> Take a screenshot
        tab ->Start/stop recording a screencast
        escape -> Quit.
        """
        if keycode == 32:
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9:
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo('screescast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 27:
            self._windowManager.destroyWindow()
Exemplo n.º 5
0
class Cameo(object):
    
    def __init__(self):
        self._windowManager = WindowManager('Cameo',
                                            self.onKeypress)
        self._captureManager = CaptureManager(
            cv2.VideoCapture(0), self._windowManager, True)
    
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            if frame is not None:
                # TODO: Filter the frame (Chapter 3).
                pass
            
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
    
    def onKeypress(self, keycode):
        """Handle a keypress.
        
        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        escape -> Quit.
        
        """
        print "keycode=", keycode
       2 if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
Exemplo n.º 6
0
 def __init__(self):
     self._windowManager = WindowManager('Cameo', self.onKeypress)
     self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                           self._windowManager, True)
     self._faceTracker = FaceTracker()
     self._shouldDrawDebugRects = False
     self._curveFilter = filters.BGRPortraCurveFilter()
Exemplo n.º 7
0
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo',
        self.onKeypress)
        self._captureManager = CaptureManager(
            cv2.VideoCapture(0), self._windowManager, True)
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()

        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            # TODO: Filter the frame (Chapter 3).
            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress (self, keycode):
        """Handle a keypress.
        space -> Take a screenshot.
        tab -> Start/stop recording a screencast.
        escape -> Quit.
        """
        if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: # tab
            if not self._captureManager.isWritingVideo:
                 self._captureManager.startWritingVideo('screencast.avi')
            else:
                 self._captureManager.stopWritingVideo()
        elif keycode == 27: # escape
            self._windowManager.destroyWindow()
Exemplo n.º 8
0
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                              self._windowManager, True)
        self._curveFilter = filters.BGRPortraCurveFilter()

    def run(self):
        """Run the main loop"""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            if frame is not None:
                filters.strokeEdges(frame, frame)
                self._curveFilter.apply(frame, frame)
            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress(self, keycode):
        """Handle a keypress
           space  ->  take a screenshot.
           tab    ->  start/stop recording a screencast
           escape ->  Quit
        """
        if keycode == 32:  # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9:  # tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo('screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 27:  # escape
            self._windowManager.destoryWindow()
Exemplo n.º 9
0
class Cameo(object):
    def __init__(self):
        self._windowManger = WindowManager('Cameo', self.onKeypress)
        self._captureManger = CaptureManager(cv2.VideoCapture(0),
                                             self._windowManger, True)
        self._curveFilter = filters.BGRPortraCurveFilter()

    def run(self):
        self._windowManger.createWindow()
        while self._windowManger.isWindowCreated:
            self._captureManger.enterFrame()
            frame = self._captureManger.frame

            filters.strokeEdges(frame, frame)
            self._curveFilter.apply(frame, frame)

            self._captureManger.exitFrame()
            self._windowManger.processEvents()

    def onKeypress(self, keycode):
        """
        空格 截图
        tab  暂停,开始
        esc 退出
        """
        if keycode == 32:  # space
            self._captureManger.writeImage('screenshot.png')
        elif keycode == 9:  #tab
            if not self._captureManger.isWritingVideo:
                self._captureManger.startWritingVideo('screencast.avi')
            else:
                self._captureManger.stopWritingVideo()
        elif keycode == 27:  #escape
            self._windowManger.destroyWindow()
Exemplo n.º 10
0
class Cameo(object):
    def __init__(self, camera_state, video_file):  #0/1为本机摄像头,-1为输入视频文件
        self._camera = camera_state
        self._video_file = video_file
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        if (camera_state == -1):
            self._captureManager = CaptureManager(cv2.VideoCapture(video_file),
                                                  self._windowManager, True,
                                                  False, False)
        else:
            self._captureManager = CaptureManager(
                cv2.VideoCapture(self._camera), self._windowManager, True,
                False, False)

    def run(self):
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            self._captureManager.shouldMirrorPreview = False  #是否镜像
            frame = self._captureManager.enterFrame
            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress(self, keycode):
        if keycode == 32:  #space  空格保存
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9:  #tab  tab录制视频
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo('screenshot.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 27:  # escape
            self._windowManager.destroyWindow()
 def __init__(self):
     self._windowManager = WindowManager('Cameo', self.onKeypress)
     #device = cv2.CAP_OPENNI2 # uncomment for Microsoft Kinect via OpenNI2
     device = cv2.CAP_OPENNI2_ASUS  # uncomment for Asus Xtion or Occipital Structure via OpenNI2
     self._captureManager = CaptureManager(cv2.VideoCapture(device),
                                           self._windowManager, True)
     self._curveFilter = filters.BGRPortraCurveFilter()
Exemplo n.º 12
0
class CameoDepth(Cameo):
    
    def __init__(self):
        self._windowManager = WindowManager('Cameo',
                                             self.onKeypress)
        self._captureManager = CaptureManager(
            cv2.VideoCapture(0), self._windowManager, True)
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False
        self._curveFilter = filters.BGRPortraCurveFilter()
    
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            self._faceTracker.update(frame)
            faces = self._faceTracker.faces
            rects.swapRects(frame, frame,
                            [face.faceRect for face in faces])
            
            filters.strokeEdges(frame, frame)
            self._curveFilter.apply(frame, frame)
            
            if self._shouldDrawDebugRects:
                self._faceTracker.drawDebugRects(frame)
            
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
Exemplo n.º 13
0
class Detect(object):
    def __init__(self):
        self._window_manager = WindowManager('Face Detector',
                                             self.on_key_press)
        self._capture_manager = CaptureManager(cv2.VideoCapture(0),
                                               self._window_manager, True)
        self._face_tracker = FaceTracker()

    def run(self):
        self._window_manager.create_window()
        while self._window_manager.is_window_created:

            self._capture_manager.enter_frame()
            frame = self._capture_manager.frame

            self._face_tracker.update(frame)
            self._face_tracker.draw_debug_rects(frame)

            self._capture_manager.exit_frame()
            self._window_manager.process_events()

    def on_key_press(self, keycode):
        """
        Keypress handler

        escape - quit
        :return:
        """

        if keycode == 27:  # escape
            self._window_manager.destroy_window()
Exemplo n.º 14
0
class CameoDouble(Cameo):
    def __init__(self):
        Cameo.__init__(self)
        self._hiddenCaptureManager = CaptureManager(cv2.VideoCapture(1))

    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            self._hiddenCaptureManager.enterFrame()
            frame = self._captureManager.frame
            hiddenFrame = self._hiddenCaptureManager.frame

            self._faceTracker.update(hiddenFrame)
            hiddenFaces = self._faceTracker.faces
            self._faceTracker.update(frame)
            faces = self._faceTracker.faces

            i = 0
            while i < len(faces) and i < len(hiddenFaces):
                rects.copyRect(hiddenFrame, frame, hiddenFaces[i].faceRect,
                               faces[i].faceRect)
                i += 1

            filters.strokeEdges(frame, frame)
            self._curveFilter.apply(frame, frame)

            if self._shouldDrawDebugRects:
                self._faceTracker.drawDebugRects(frame)

            self._captureManager.exitFrame()
            self._hiddenCaptureManager.exitFrame()
            self._windowManager.processEvents()
Exemplo n.º 15
0
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                              self._windowManager, True)
        self._curveFilter = filters.BlurFilter()

    def run(self):
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            #add additional function here
            filters.strokeEdges(frame, frame)
            self._curveFilter.apply(frame, frame)

            self._captureManager.exitFrame()
            self._windowManager.processEvents()
        self._captureManager._capture.release()

    def onKeypress(self, keycode):
        if keycode == 32:  #space
            self._captureManager.writeImage('./data/pictures/screenshot.png')
        elif keycode == 9:  #tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo(r'F:/screen_cast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 27:  #exit
            self._windowManager.destroyWindow()
Exemplo n.º 16
0
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager("Cameo", self.onkeypress)
        self._captureManager = CaptureManager(cv.VideoCapture(0),
                                              self._windowManager, True)
        self._curveFilter = filters.BGRPortraCurveFilter()

    def run(self):
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            copyFrame = frame
            filters.strokeEdges(frame, copyFrame, 9, 7)
            self._windowManager.show(copyFrame)
            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onkeypress(self, keycode):
        if keycode == 32:  #space somerberry
            self._captureManager.writeImage("screenshot.png")
        elif keycode == 9:  #tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo("screencast.avi")
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 27:  #esc
            self._windowManager.destroyWindow()
Exemplo n.º 17
0
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                              self._windowManager, True)

    def run(self):
        """
        执行循环
        """
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            #  过滤帧
            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress(self, keycode):
        """
        处理键盘输入:
        1、空格截屏
        2、tab开始停止录像
        3、esc退出
        """
        if keycode == 32:  #space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9:  #tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo('screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 27:  #escape
            self._windowManager.destroyWindow()
Exemplo n.º 18
0
class Cameo:
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                              self._windowManager, True)

    def run(self):
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            # todo filter
            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress(self, keycode):
        """
        space   -> screenshot
        tab     -> start/stop
        escape  -> quit
        :param keycode:
        :return:
        """
        if keycode == 32:  #space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9:  #tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo('screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 27:  #escape
            self._windowManager.destroyWindow()
Exemplo n.º 19
0
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                              self._windowManager, True)
        self._curveFilter = filters.EmbossFilter()

    def run(self):
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame

            # filter the frame
            #             frame = cv2.GaussianBlur(frame, (3, 3), 0)
            #             self._captureManager._frame = cv2.Canny(frame, 50, 150)
            filters.strokeEdges(frame, frame)
            self._curveFilter.apply(frame, frame)

            self._captureManager.exitFrame()
            self._windowManager.processEvent()

    def onKeypress(self, keycode):
        if keycode == 32:  # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9:  # tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo('screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 27:  # escape
            self._windowManager.destroyWindow()
Exemplo n.º 20
0
class Cameo(object):

    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.on_keypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True)

    def run(self):
        """Run the main loop."""
        self._windowManager.create_window()
        while self._windowManager.is_window_created:
            self._captureManager.enter_frame()
            frame = self._captureManager.frame
            if frame is not None:
                # TODO: Filter the frame (Chapter 3).
                pass
            self._captureManager.exit_frame()
            self._windowManager.process_events()

    def on_keypress(self, keycode):
        """Handle a keypress.

        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        escape -> Quit.

        """
        if keycode == 32:  # space
            self._captureManager.write_image('out.png')
        elif keycode == 9:  # tab
            if not self._captureManager.is_writing_video:
                self._captureManager.start_writing_video('out.avi')
            else:
                self._captureManager.stop_writing_video()
        elif keycode == 27:  # escape
            self._windowManager.destroy_window()
Exemplo n.º 21
0
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo',self.onKeypress)
        self._captureManager= CaptureManager(cv.VideoCapture(0),self._windowManager,True)
        
    def run(self):
        #run the loop
        self._windowManager.createWindow()
        while (self._windowManager.isWindowCreated()):
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            self._captureManager.exitFrame()
            self._windowManager.processEvent()
            
    def onKeypress(self,keycode):
        """ handle a keypress
        sapce  -> Take a screenshot
        tab    -> Start/Stop recording and screencast
        escape -> Quit
        """
        if (keycode == 32): # sapce
            self._captureManager.writeImage('screenshot.png')
        elif(keycode == 9): # tab
            if(not self._captureManager.isWrittingVideo):
                self._captureManager.startWrittingVideo('screencast.avi')
            else:
                self._captureManager.stopWrittingVideo()
        elif(keycode == 27): #escape
            self._windowManager.destroyWindow()
Exemplo n.º 22
0
 def __init__(self):
     '''创建一个窗口,写上按键的回调方法'''
     self._windowManager = WindowManager('Cameo', self.onKeypress)
     # cameraCapture = cv2.VideoCapture(0)  # 获取摄像头数据
     cameraCapture = cv2.VideoCapture('/home/deepl/Documents/wb/opencv-3.0.0/samples/data/768x576.avi')
     '''告诉程序数据来自摄像头, 还有镜面效果'''
     self._captureManager = CaptureManager(cameraCapture, self._windowManager, True)
Exemplo n.º 23
0
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True)
        # self._curveFilter = filters.EmbossFilter()

    def run(self):
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame

            filters.strokeEdge(frame, frame)
            # self._curveFilter.apply(frame, frame)

            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress(self, keycode):
        if keycode == 32: #space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: #tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo('screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        if keycode ==27:#esc
            self._windowManager.destroyWindow()
Exemplo n.º 24
0
 def __init__(self, video_source):
     self._windowManager = WindowManager('Browser', self.onKeypress)
     self._captureManager = CaptureManager(video_source,
                                           self._windowManager, True)
     self._faceTracker = FaceTracker()
     self._shouldDrawDebugRects = False
     self._curveFilter = filters.BGRPortraCurveFilter()
Exemplo n.º 25
0
class FaceID():
    def __init__(self):
        self._camera = cv2.VideoCapture(0)
        self._camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1024)
        self._camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 768)
        self._windowManager = WindowManager('Face ID', self.onKeypress)
        self._captureManager = CaptureManager(self._camera,
                                              self._windowManager, True)
        self._faceDetector = FaceDetector()

    def run(self):  # run the application
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame

            self._faceDetector.update(frame)
            faces = self._faceDetector.faces

            for face in faces:
                x, y, h, w = face.faceRect
                face_image = frame[y:y + h, x:x + w]
                cv2.imshow('face', face_image)
                break

            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress(self, keycode):
        # handle a keypress
        if keycode == 27:  # escape
            self._windowManager.destroyWindow()
            self._camera.release()
        elif chr(keycode).isalnum():
            self._windowManager.appendChar(chr(keycode))
Exemplo n.º 26
0
 def __init__(self):
     # 引入窗口管理类
     self._windowManager = WindowManager('Cameo', self.onKeypress)
     # 引入摄像头捕捉类
     self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                           self._windowManager, True)
     # 引入滤波器类
     self._curveFilter = filters.FindEdgesFilter()
Exemplo n.º 27
0
 def __init__(self):
     self._camera = cv2.VideoCapture(0)
     self._camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1024)
     self._camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 768)
     self._windowManager = WindowManager('Face ID', self.onKeypress)
     self._captureManager = CaptureManager(self._camera,
                                           self._windowManager, True)
     self._faceDetector = FaceDetector()
Exemplo n.º 28
0
    def __init__(self):
        self._windowManager = WindowsManager('Cameo', self.onkeypress)
        self._capturemanager = CaptureManager(cv2.VideoCapture(0),
                                              self._windowManager, True)

        # 此处滤波器声明
        self._BlurFilter = filters.BlurFilter()
        self._FindEdgesFilter = filters.FindEdgesFilter()
Exemplo n.º 29
0
 def __init__(self):
     # 创建一个窗口,并将键盘的回调函数传入
     self._windowManager = WindowManager('Cameo', self.onKeypress)
     # 告诉程序数据来自摄像头, 还有镜面效果
     self._captureManager = CaptureManager(
         capture=cv.VideoCapture(0),
         previewWindowManager=self._windowManager,
         shouldMirrorPreview=True)
Exemplo n.º 30
0
 def __init__(self, windowName='Cameo', _shouldMirrorPreview=True):
     self._windowManager = WindowManager(windowName, self.onKeypress)
     self._captureManager = CaptureManager(
         capture=cv2.VideoCapture(0),
         previewWindowManager=self._windowManager,
         shouldMirrorPreview=_shouldMirrorPreview)
     self._pictureNumber: int = 0
     self._videoNumber: int = 0
     self._curveFilter = filters.BGRPortraCurveFilter()
Exemplo n.º 31
0
 def __init__(self):
     self._windowManager = WindowManager('Cameo', self.onKeyPressed)
     self._captureManager = CaptureManager(
         capture=cv2.VideoCapture(0),
         previewWindowManager=self._windowManager,
         mirrorPreview=True)
     self._curveFilter = filters.BGRPortraCurveFilter()
     self._faceTracker = FaceTracker()
     self._showDetectionRects = False
Exemplo n.º 32
0
 def __init__(self):
     self._windowManager = WindowManager('Cameo', self.onKeypress)
     #device = cv2.CAP_OPENNI2 # uncomment for Microsoft Kinect via OpenNI2
     device = cv2.CAP_OPENNI2_ASUS  # uncomment for Asus Xtion via OpenNI2
     self._captureManager = CaptureManager(cv2.VideoCapture(device),
                                           self._windowManager, True)
     self._faceTracker = FaceTracker()
     self._shouldDrawDebugRects = False
     self._curveFilter = filters.BGRPortraCurveFilter()
Exemplo n.º 33
0
Arquivo: cameo.py Projeto: sarvex/pycv
class Cameo(object):
    
    def __init__(self):
        self._windowManager = WindowManager('Cameo',
                                             self.onKeypress)
        self._captureManager = CaptureManager(
            cv2.VideoCapture(0), self._windowManager, True)
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False
        self._curveFilter = filters.BGRPortraCurveFilter()
    
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            if frame is not None:
                
                self._faceTracker.update(frame)
                faces = self._faceTracker.faces
                rects.swapRects(frame, frame,
                                [face.faceRect for face in faces])
            
                filters.strokeEdges(frame, frame)
                self._curveFilter.apply(frame, frame)
                
                if self._shouldDrawDebugRects:
                    self._faceTracker.drawDebugRects(frame)
            
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
    
    def onKeypress(self, keycode):
        """Handle a keypress.
        
        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        x      -> Start/stop drawing debug rectangles around faces.
        escape -> Quit.
        
        """
        if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: # tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo(
                    'screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 120: # x
            self._shouldDrawDebugRects = \
                not self._shouldDrawDebugRects
        elif keycode == 27: # escape
            self._windowManager.destroyWindow()
Exemplo n.º 34
0
 def __init__(self):
     self._windowManager = WindowManager('Cameo', self.onKeypress)
     self._captureManager = CaptureManager(cv2.VideoCapture(0),
                 self._windowManager, True)
     self._curveFilter = filters.BGRProviaCurveFilter()
     self._faceTracker = FaceTracker()
     self._shouldDrawDebugRects = False
Exemplo n.º 35
0
    def setSource ( self, value ):
        if self._cap:
            if self._cap._capture.isOpened(): #ugly access to VideoCapture object from managers
                self._cap._capture.release()
        
        self._cap = CaptureManager( cv2.VideoCapture(int(value)) )
        #self.indWindow = cv2.namedWindow("Camera Display")
        self.showImage = True
        self.actualRes = self._cap.getResolution()


        # Set to default resolution from camera (640x480)
        #res = "%d x %d" % (int(self.actualRes[0]), int(self.actualRes[1]) )
        #index = self.ui.comboBox.findText(res) #Sets picker to default resolution
        #self.ui.comboBox.setCurrentIndex(index)

        # Set to Hoky desired default res (1280x960)
        res = "%d x %d" % (int(self.idealRes[0]), int(self.idealRes[1]) )
        index = self.ui.comboBox.findText(res) #Sets picker to default resolution
        self.ui.comboBox.setCurrentIndex(index)
        self.resolutionSelect()
        




        self.p = Point(200,300)
        self.c = Point(self.actualRes[0] // 2, self.actualRes[1] // 2)
        self.setAble('settings', True)
Exemplo n.º 36
0
 def __init__(self):
     self._windowManager = WindowManager('Cameo',
                                          self.onKeypress)
     self._captureManager = CaptureManager(
         cv2.VideoCapture("videos/Megamind.avi"), self._windowManager, False)
     self._faceTracker = FaceTracker()
     self._shouldDrawDebugRects = False
     self._curveFilter = filters.BGRPortraCurveFilter()
Exemplo n.º 37
0
class CameoDepth(Cameo):
    
    def __init__(self):
        self._windowManager = WindowManager('Cameo',
                                             self.onKeypress)
        device = depth.CV_CAP_OPENNI # uncomment for Microsoft Kinect
        #device = depth.CV_CAP_OPENNI_ASUS # uncomment for Asus Xtion
        self._captureManager = CaptureManager(
            cv2.VideoCapture(device), self._windowManager, True)
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False
        self._curveFilter = filters.BGRPortraCurveFilter()
    
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            self._captureManager.channel = \
                depth.CV_CAP_OPENNI_DISPARITY_MAP
            disparityMap = self._captureManager.frame
            self._captureManager.channel = \
                depth.CV_CAP_OPENNI_VALID_DEPTH_MASK
            validDepthMask = self._captureManager.frame
            self._captureManager.channel = \
                depth.CV_CAP_OPENNI_BGR_IMAGE
            frame = self._captureManager.frame
            
            self._faceTracker.update(frame)
            faces = self._faceTracker.faces
            masks = [
                depth.createMedianMask(
                    disparityMap, validDepthMask, face.faceRect) \
                for face in faces
            ]
            rects.swapRects(frame, frame,
                            [face.faceRect for face in faces], masks)
            
            filters.strokeEdges(frame, frame)
            self._curveFilter.apply(frame, frame)
            
            if self._shouldDrawDebugRects:
                self._faceTracker.drawDebugRects(frame)
            
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
Exemplo n.º 38
0
 def __init__(self):
     self._windowManager = WindowManager('Cameo',self.onKeypress)
     device = depth.CV_CAP_OPENNI # uncomment for Microsoft Kinect
     #device = depth.CV_CAP_OPENNI_ASUS # uncomment for Asus Xtion
     self._captureManager = CaptureManager(cv2.VideoCapture(device), self._windowManager, True)
     self._faceTracker = FaceTracker()
     self._shouldDrawDebugRects = False
     self._curveFilter = filters.BGRPortraCurveFilter()
Exemplo n.º 39
0
 def __init__(self):
     '''
     Constructor
     
     '''
     self._windowManager = WindowManager('Cameo', self.onKeypress)
     self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True)
     
     self._faceTracker = FaceTracker() 
     self._shouldDrawDebugRects = False
     
     self._curveFilter = filters.BGRCrossProcessCurveFilter()
Exemplo n.º 40
0
Arquivo: cameo.py Projeto: sarvex/pycv
class CameoDouble(Cameo):
    
    def __init__(self):
        Cameo.__init__(self)
        self._hiddenCaptureManager = CaptureManager(
            cv2.VideoCapture(1))
    
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            self._hiddenCaptureManager.enterFrame()
            frame = self._captureManager.frame
            hiddenFrame = self._hiddenCaptureManager.frame
            
            if frame is not None:
                if hiddenFrame is not None:
                    self._faceTracker.update(hiddenFrame)
                    hiddenFaces = self._faceTracker.faces
                    self._faceTracker.update(frame)
                    faces = self._faceTracker.faces
                
                    i = 0
                    while i < len(faces) and i < len(hiddenFaces):
                        rects.copyRect(
                            hiddenFrame, frame, hiddenFaces[i].faceRect,
                            faces[i].faceRect)
                        i += 1
                
                filters.strokeEdges(frame, frame)
                self._curveFilter.apply(frame, frame)
                
                if hiddenFrame is not None and self._shouldDrawDebugRects:
                    self._faceTracker.drawDebugRects(frame)
            
            self._captureManager.exitFrame()
            self._hiddenCaptureManager.exitFrame()
            self._windowManager.processEvents()
 def __init__(self):
     self._windowManager = WindowManager('benFinder',
                                          self.onKeypress)
     device = depth.CV_CAP_FREENECT
     #device = 1
     print "device=%d" % device
     self._captureManager = CaptureManager(
         device, self._windowManager, True)
     self._captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE
     self._faceTracker = FaceTracker()
     self._shouldDrawDebugRects = False
     self._backgroundSubtract = False
     self._autoBackgroundSubtract = False
     self._curveFilter = filters.BGRPortraCurveFilter()
     self.background_video_img = None
     self.background_depth_img = None
     self.autoBackgroundImg = None
     self._ts = TimeSeries()
     self._frameCount = 0
Exemplo n.º 42
0
    def setSource ( self, value ):
        self.existingVid = False

        if type(value) != int:
            nums = re.compile(r'\d*')
            mat = re.match(nums,value).group(0)
            
            if re.match(nums, value).group(0) is not u'':
                src = int(value)
            else:
                src = str(value)
                self.existingVid = True
        else: #read in from calibration
            src = value
            self.ui.sourceCombo.setCurrentText(str(src))
            
        if self._cap:
            if self._cap._capture.isOpened(): #ugly access to VideoCapture object from managers
                self._cap._capture.release()

        
        self._cap = CaptureManager( cv2.VideoCapture(src) )
        self.showImage = True
        
        self.actualRes = self._cap.getResolution()
        
        if self.actualRes != self.calibrationRes and not self.existingVid:
            self._cap.setProp( 'height', self.calibrationRes[1] )
            self._cap.setProp( 'width', self.calibrationRes[0] )
        
        self.showImage = True

        self.actualRes = self._cap.getResolution()
        logger.warning("Actual resolution from cam is %s" % str( self.actualRes ) )
        self.setAble('source', False) 
        self.setAble('buttons', True)
        self.ui.buttonRefresh.setEnabled(False)
class BenFinder(object):
    BACKGROUND_VIDEO_FNAME = "background_video.png"
    BACKGROUND_DEPTH_FNAME = "background_depth.png"
 
    def __init__(self):
        self._windowManager = WindowManager('benFinder',
                                             self.onKeypress)
        device = depth.CV_CAP_FREENECT
        #device = 1
        print "device=%d" % device
        self._captureManager = CaptureManager(
            device, self._windowManager, True)
        self._captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False
        self._backgroundSubtract = False
        self._autoBackgroundSubtract = False
        self._curveFilter = filters.BGRPortraCurveFilter()
        self.background_video_img = None
        self.background_depth_img = None
        self.autoBackgroundImg = None
        self._ts = TimeSeries()
        self._frameCount = 0
    
    def loadBackgroundImages(self):
        """ Load the background images to be used for background subtraction
        from disk files.
        """
        self.background_video_img = cv2.imread(BenFinder.BACKGROUND_VIDEO_FNAME)
        self.background_depth_img = cv2.imread(BenFinder.BACKGROUND_DEPTH_FNAME,
                                               cv2.CV_LOAD_IMAGE_GRAYSCALE)

    def showBackgroundImage(self):
        """ Display the background image used for subtraction in a separate window
        """
        # Load the images from disk if necessary.
        if (not self.background_depth_img or not self.background_video_img):
            self.loadBackgroundImages()
        # Display the correct image
        if (self._autoBackgroundSubtract):
            cv2.imshow("Auto Background Image", self.autoBackgroundImg)
        else:
            if (self._captureManager.channel == \
                depth.CV_CAP_OPENNI_DEPTH_MAP):
                cv2.imshow("background_depth_img",self.background_depth_img)
            elif (self._captureManager.channel == \
                  depth.CV_CAP_OPENNI_BGR_IMAGE):
                cv2.imshow("background_video_img",self.background_video_img)
            else:
                print "Error - Invalid Channel %d." % \
                    self._captureManager.channel

    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()

            frame = self._captureManager.frame
            
            if frame is not None:
                if (self._backgroundSubtract):
                    if (self._autoBackgroundSubtract):
                        if (self._captureManager.channel == \
                            depth.CV_CAP_OPENNI_DEPTH_MAP):
                            if (self.autoBackgroundImg == None):
                                self.autoBackgroundImg = numpy.float32(frame)
                            # First work out the region of interest by 
                            #    subtracting the fixed background image 
                            #    to create a mask.
                            absDiff = cv2.absdiff(frame,self.background_depth_img)
                            benMask,maskArea = filters.getBenMask(absDiff,8)

                            cv2.accumulateWeighted(frame,
                                                   self.autoBackgroundImg,
                                                   0.05)
                            # Convert the background image into the same format
                            # as the main frame.
                            bg = cv2.convertScaleAbs(self.autoBackgroundImg,
                                                     alpha=1.0)
                            # Subtract the background from the frame image
                            cv2.absdiff(frame,bg,frame)
                            # Scale the difference image to make it more sensitive
                            # to changes.
                            cv2.convertScaleAbs(frame,frame,alpha=100)
                            #frame = cv2.bitwise_and(frame,frame,dst=frame,mask=benMask)
                            frame = cv2.multiply(frame,benMask,dst=frame,dtype=-1)
                            bri = filters.getMean(frame,benMask)
                            #print "%4.0f, %3.0f" % (bri[0],self._captureManager.fps)
                            self._ts.addSamp(bri[0])
                            if (self._frameCount < 15):
                                self._frameCount = self._frameCount +1
                            else:
                                self._ts.plotRawData()
                                self._ts.findPeaks()
                                self._frameCount = 0
                        else:
                            print "Auto background subtract only works for depth images!"
                    else:
                        if (self._captureManager.channel == \
                            depth.CV_CAP_OPENNI_DEPTH_MAP):
                            cv2.absdiff(frame,self.background_depth_img,frame)
                            benMask = filters.getBenMask(frame,8)
                            bri = filters.getMean(frame,benMask)
                            print bri
                        elif (self._captureManager.channel == \
                              depth.CV_CAP_OPENNI_BGR_IMAGE):
                            cv2.absdiff(frame,self.background_video_img,frame)
                        else:
                            print "Error - Invalid Channel %d." % \
                                self._captureManager.channel
                    #ret,frame = cv2.threshold(frame,200,255,cv2.THRESH_TOZERO)
                #self._faceTracker.update(frame)
                #faces = self._faceTracker.faces

                #if self._shouldDrawDebugRects:
                #    self._faceTracker.drawDebugRects(frame)
                            
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
    
    def onKeypress(self, keycode):
        """Handle a keypress.
        
        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        x      -> Start/stop drawing debug rectangles around faces.
        a      -> toggle automatic accumulated background subtraction on or off.
        b      -> toggle simple background subtraction on or off.
        s      -> Save current frame as background image.
        d      -> Toggle between video and depth map view
        i      -> Display the background image that is being used for subtraction.
        escape -> Quit.
        
        """
        print "keycode=%d" % keycode
        if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: # tab
            if not self._captureManager.isWritingVideo:
                print "Starting Video Recording..."
                self._captureManager.startWritingVideo(
                    'screencast.avi')
            else:
                print "Stopping video recording"
                self._captureManager.stopWritingVideo()
        elif keycode == 120: # x
            self._shouldDrawDebugRects = \
                not self._shouldDrawDebugRects
        elif (chr(keycode)=='a'):  # Autometic background subtraction
            if (self._autoBackgroundSubtract == True):
                print "Switching off auto background Subtraction"
                self.autoBackgroundImage = None
                self._autoBackgroundSubtract = False
            else:
                print "Switching on auto background subtraction"
                self._autoBackgroundSubtract = True
        elif (chr(keycode)=='b'):  # Simple background subtraction
            if (self._backgroundSubtract == True):
                print "Switching off background Subtraction"
                self._backgroundSubtract = False
            else:
                print "Switching on background subtraction"
                self.loadBackgroundImages()
                self._backgroundSubtract = True
        elif (chr(keycode)=='d'):
            if (self._captureManager.channel == depth.CV_CAP_OPENNI_BGR_IMAGE):
                print "switching to depth map..."
                self._captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP
            else:
                print "switching to video"
                self._captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE
        elif (chr(keycode)=='i'):
            self.showBackgroundImage()
        elif (chr(keycode)=='s'):
            print "Saving Background Image"
            if (self._captureManager.channel == depth.CV_CAP_OPENNI_DEPTH_MAP):
                self._captureManager.writeImage(BenFinder.BACKGROUND_DEPTH_FNAME)
            elif (self._captureManager.channel == depth.CV_CAP_OPENNI_BGR_IMAGE):
                self._captureManager.writeImage(BenFinder.BACKGROUND_VIDEO_FNAME)
            else:
                print "Invalid Channel %d - doing nothing!" \
                    % self._captureManager.channel
                

        elif keycode == 27: # escape
            self._windowManager.destroyWindow()
Exemplo n.º 44
0
class Tracker(object):
    def __init__(self, method, src):
        self.color = True
        self.motorsOn = False

        ### Sensitivity of tracker params
        self._sampleFreq = 0.1  # in sec

        ### Set Camera params
        # self.resolution = (640, 480 )
        self.resolution = (1280, 960)
        source = {
            0: 0,
            1: 1,
            2: "led_move1.avi",
            3: "screencast.avi",
            4: "screencast 1.avi",
            5: "shortNoBox.avi",
            6: "longNoBox.avi",
            7: "H299.avi",
            8: "testRec.avi",
            9: "longDemo.avi",
        }
        self.captureSource = source[int(src)]

        ### Timing initialization
        self._startTime = time.time()
        self._lastCheck = self._startTime - self._sampleFreq

        ### Display params
        self.mirroredPreview = False

        ### Initialize Objects

        ##### Windows

        self._rawWindow = WindowManager("RawFeed", self.onKeypress)

        ### Capture -- resolution set here
        self._cap = CaptureManager(
            cv2.VideoCapture(self.captureSource), self._rawWindow, self.mirroredPreview, self.resolution
        )

        actualCols, actualRows = self._cap.getResolution()
        self.centerPt = utils.Point(actualCols / 2, actualRows / 2)

        ## from here on out use this resolution
        boundCols = 600
        boundRows = 600
        ### Arguments for finder
        # --> Pairs are always COLS, ROWS !!!!!!!
        self.finderArgs = {
            "method": method,
            "gsize": 45,
            "gsig": 9,
            "window": 3,
            "MAXONEFRAME": 500,
            "REFPING": 600000,
            "MAXREF": 1000,
            "captureSize": utils.Rect(actualCols, actualRows, self.centerPt),
            "cropRegion": utils.Rect(100, 100, self.centerPt),
            "decisionBoundary": utils.Rect(boundCols, boundRows, self.centerPt),
            "color": self.color,
            "motorsOn": self.motorsOn,
        }

        self._wormFinder = WormFinder(**self.finderArgs)

        ##### Debugging
        #        self._gaussianWindow = WindowManager('Gaussian', self.onKeypress)
        self._overlayWindow = WindowManager("Overlay", self.onKeypress)

    def run(self):

        # Show windows
        self._rawWindow.createWindow()
        self._overlayWindow.createWindow()
        i = 0
        while self._rawWindow.isWindowCreated:
            self._cap.enterFrame()
            frame = self._cap.frame

            # Probably not useful, removes errors when playing from video
            #            if not self._captureManager.gotFrame:
            #                self.shutDown()
            #                break

            # Display raw frame to rawWindow

            t1 = time.time()
            # Get frame
            frame = self._cap.frame

            # Show frame to raw feed
            self._rawWindow.show(frame)

            # If tracking is enabled or motors are on, start tracking
            if time.time() - self._lastCheck >= self._sampleFreq:
                if self.finderArgs["method"] in ["lazyc", "lazyd", "lazy"]:
                    self.gaussian = self._wormFinder.processFrame(frame)

                    self.overlayImage = copy.deepcopy(frame)
                    if self.motorsOn:
                        self._wormFinder.decideMove()
                    self._lastCheck = time.time()
                    self._wormFinder.drawDebugCropped(self.overlayImage)
                    self._wormFinder.drawTextStatus(self.overlayImage, self._cap.isWritingVideo, self.motorsOn)

                    self._overlayWindow.show(self.overlayImage)
                #                    if self.gaussian is not None:
                #                        self._gaussianWindow.show(self.gaussian)
                #                        cv2.imwrite('g-%d.jpg' % i, self.gaussian )
                #                        cv2.imwrite('o-%d.jpg' % i, self.overlayImage )

                if self.finderArgs["method"] in ["test", "conf"]:
                    #                    self.overlayImage = copy.deepcopy(frame)
                    self._wormFinder.drawTest(frame)  # self.overlayImage)
            #                    self._overlayWindow.show(self.overlayImage)

            i += 1
            self._cap.exitFrame()
            self._rawWindow.processEvents()
            logt.info("processing: %0.6f" % (time.time() - t1))

    @property
    def isDebug(self):
        return logt.getEffectiveLevel() <= logging.INFO

    def shutDown(self):
        self._rawWindow.destroyWindow()

        if self._cap.isWritingVideo:
            self._cap.stopWritingVideo()
        try:
            self._wormFinder.servos.disableMotors()
            self._wormFinder.servos.closeSerial()
        except Exception as e:
            logt.exception(str(e))

    def onKeypress(self, keycode):
        """
        Keypress options
        <SPACE> --- Motors On
        < TAB > --- start/stop recording screencast
        < ESC > --- quit

        """

        if keycode == 32:  # space

            if self.motorsOn:
                self.motorsOn = False  # _captureManager.writeImage('screenshot.png')
                if not self.isDebug:
                    self._wormFinder.servos.disableMotors()

            else:
                self.motorsOn = True
                self._wormFinder.launch = 0
                if not self.isDebug:
                    self._wormFinder.servos.enableMotors()
                    self._wormFinder.launch = 0
                    time.sleep(2)

        elif keycode == 9:  # tab
            if not self._cap.isWritingVideo:
                self._cap.startWritingVideo(
                    "worm%s.avi" % time.strftime("%Y_%m_%d-%H-%M-%S", time.localtime(time.time())),
                    cv2.cv.CV_FOURCC(*"MJPG"),
                )

            else:
                self._cap.stopWritingVideo()

        elif keycode == 27:  # escape
            self.shutDown()
Exemplo n.º 45
0
    def __init__(self, method, src):
        self.color = True
        self.motorsOn = False

        ### Sensitivity of tracker params
        self._sampleFreq = 0.1  # in sec

        ### Set Camera params
        # self.resolution = (640, 480 )
        self.resolution = (1280, 960)
        source = {
            0: 0,
            1: 1,
            2: "led_move1.avi",
            3: "screencast.avi",
            4: "screencast 1.avi",
            5: "shortNoBox.avi",
            6: "longNoBox.avi",
            7: "H299.avi",
            8: "testRec.avi",
            9: "longDemo.avi",
        }
        self.captureSource = source[int(src)]

        ### Timing initialization
        self._startTime = time.time()
        self._lastCheck = self._startTime - self._sampleFreq

        ### Display params
        self.mirroredPreview = False

        ### Initialize Objects

        ##### Windows

        self._rawWindow = WindowManager("RawFeed", self.onKeypress)

        ### Capture -- resolution set here
        self._cap = CaptureManager(
            cv2.VideoCapture(self.captureSource), self._rawWindow, self.mirroredPreview, self.resolution
        )

        actualCols, actualRows = self._cap.getResolution()
        self.centerPt = utils.Point(actualCols / 2, actualRows / 2)

        ## from here on out use this resolution
        boundCols = 600
        boundRows = 600
        ### Arguments for finder
        # --> Pairs are always COLS, ROWS !!!!!!!
        self.finderArgs = {
            "method": method,
            "gsize": 45,
            "gsig": 9,
            "window": 3,
            "MAXONEFRAME": 500,
            "REFPING": 600000,
            "MAXREF": 1000,
            "captureSize": utils.Rect(actualCols, actualRows, self.centerPt),
            "cropRegion": utils.Rect(100, 100, self.centerPt),
            "decisionBoundary": utils.Rect(boundCols, boundRows, self.centerPt),
            "color": self.color,
            "motorsOn": self.motorsOn,
        }

        self._wormFinder = WormFinder(**self.finderArgs)

        ##### Debugging
        #        self._gaussianWindow = WindowManager('Gaussian', self.onKeypress)
        self._overlayWindow = WindowManager("Overlay", self.onKeypress)
    def __init__(self,save=False, inFile = None):
        print "benFinder.__init__()"
        print os.path.realpath(__file__)
        configPath = "%s/%s" % (os.path.dirname(os.path.realpath(__file__)),
                                self.configFname)
        print configPath
        self.cfg = ConfigUtil(configPath,self.configSection)

        self.debug = self.cfg.getConfigBool("debug")
        if (self.debug): print "Debug Mode"

        self._wkdir = self.cfg.getConfigStr("working_directory")
        if (self.debug): print "working_directory=%s\n" % self._wkdir
        self._tmpdir = self.cfg.getConfigStr("tmpdir")
        if (self.debug): print "tmpdir=%s\n" % self._tmpdir


        # Check if we are running from live kinect or a file.
        if (inFile):
            device = depth.CV_CAP_FILE
        else:
            device = depth.CV_CAP_FREENECT

        # Initialise the captureManager
        self._captureManager = CaptureManager(
            device, None, True, inFile=inFile)
        self._captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP

        # If we are runnign from a file, use the first frame as the
        # background image.
        if (inFile):
            self.saveBgImg()

        # If we have asked to save the background image, do that, and exit,
        # otherwise initialise the seizure detector.
        if (save):
            self.saveBgImg()
        else:
            self.loadBgImg()
            self.autoBackgroundImg = None
            self._status = self.ALARM_STATUS_OK
            self._ts = TimeSeries(tslen=self.cfg.getConfigInt("timeseries_length"))
            self._frameCount = 0
            self._outputFrameCount = 0
            self._nPeaks = 0
            self._ts_time = 0
            self._rate = 0
            self._ws = webServer.benWebServer(self)
            self._ws.setBgImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("background_depth")))
            self._ws.setChartImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("chart_fname")))
            self._ws.setRawImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("raw_image_fname")))
            self._ws.setMaskedImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("masked_image_fname")))
            self._ws.setDataFname("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("data_fname")))
            self._ws.setAnalysisResults({})
            webServer.setRoutes(self._ws)
            self.run()
Exemplo n.º 47
0
 def __init__(self):
     self._windowManager = WindowManager('Cameo',
                                          self.onKeypress)
     self._captureManager = CaptureManager(
         cv2.VideoCapture(0), self._windowManager, True)
     self._curveFilter = filters.BGRPortraCurveFilter()
Exemplo n.º 48
0
Arquivo: cameo.py Projeto: sarvex/pycv
 def __init__(self):
     Cameo.__init__(self)
     self._hiddenCaptureManager = CaptureManager(
         cv2.VideoCapture(1))
Exemplo n.º 49
0
    def __init__( self, method, src ):

        ### Sensitivity of tracker params
        self._sampleFreq = 0.1 #in sec
        
        ### Set Camera params
        #self.resolution = (640, 480 )
        self.resolution = (1280, 960)
        source = {
            0:0, 
            1:1, 
            2:'led_move1.avi', 
            3:'screencast.avi', 
            4:'screencast 1.avi',
            5: 'shortNoBox.avi',
            6: 'longNoBox.avi',
            7: 'H299.avi',
            8: 'testRec.avi',
            9: 'longDemo.avi',
            10: 'worm2014_05_05-12-44-53.avi'
            }
        self.color = True
        self.captureSource = source[int(src)]
        
        ### Timing initialization
        self._startTime = time.time()
        self._lastCheck = self._startTime - self._sampleFreq

        ### Display params
        self.mirroredPreview = False


        ### Initialize Objects       

        ##### Windows

        self._rawWindow = WindowManager( 'RawFeed', self.onKeypress )

        ### Capture -- resolution set here
        self._cap = CaptureManager( 
            cv2.VideoCapture(self.captureSource), 
            self._rawWindow, 
            self.mirroredPreview, self.resolution)
        
        actualCols, actualRows = self._cap.getResolution()
        ## from here on out use this resolution 
        
        ### Arguments for finder
        self.finderArgs = {
            'method' : method,
            'gsize' :  45,
            'gsig' : 9,
            'window' : 3,
            'boundBoxRow' : 150,
            'boundBoxCol' : 150,
            'limRow' : 100,
            'limCol' : 100,
            'MAXONEFRAME': 500,
            'REFPING' : 600000,
            'MAXREF': 1000,
            'capCols':actualCols,
            'capRows': actualRows,
            'color' : self.color
            }

        self._wormFinder = WormFinder( **self.finderArgs )     

        ##### Debugging
        self._overlayWindow = WindowManager( 'Overlay', self.onKeypress )
        self.motorsOn = False
Exemplo n.º 50
0
 def __init__(self,video_source):  
     self._windowManager = WindowManager('Browser', self.onKeypress)
     self._captureManager = CaptureManager(video_source, self._windowManager, True)
     self._faceTracker = FaceTracker()
     self._shouldDrawDebugRects = False
     self._curveFilter = filters.BGRPortraCurveFilter()
Exemplo n.º 51
0
class Gui(QtWidgets.QMainWindow):

    ## Signals
    #closeGui = QtCore.pyqtSignal()

    def __init__(self, parent=None):
        QtWidgets.QWidget.__init__(self, parent)
        
        
        ## Instantiate classes
        self.ui = Ui_CalibrationUI()
        self.ui.setupUi(self)

        self._cap = None
        self.ebb = None
        ## Stuff you can change in the UI
        self.width = None
        self.idealRes = [1280, 960]
        self.colSteps = 0
        self.rowSteps = 0
        self.multFactor = 1
        self.motorsAble = False
        self.showImage = False

        ## Stuff that doesn't change
        #self._cap = CaptureManager( cv2.VideoCapture(0) )
        
        
        

        ## Video stuff
        self._timer = QtCore.QTimer( self )
        self._timer.timeout.connect( self.play )
        self._timer.start(27)
        self.update()

        source = [0, 1, 2,3, 'led_move1.avi', 
                  'screencast.avi',
                  'screencast 1.avi',
                  'shortNoBox.avi',
                  'longNoBox.avi',
                  'H299.avi',
                  'testRec.avi',
                  'longDemo.avi']
        for s in source:
            self.ui.sourceCombo.addItem(str(s))


        ## Register button actions
        self.ui.buttonSet.clicked.connect( self.set )
        self.ui.buttonCenter.clicked.connect( self.center )
        self.ui.buttonReset.clicked.connect( self.reset )
        self.ui.buttonSave.clicked.connect( self.save )
        self.ui.buttonRight.clicked.connect( partial( self.motors, 'right' ) )
        self.ui.buttonLeft.clicked.connect( partial( self.motors, 'left' ) )
        self.ui.buttonUp.clicked.connect( partial( self.motors, 'up' ) )
        self.ui.buttonDown.clicked.connect( partial( self.motors, 'down' ) )
        self.ui.scalingSlider.valueChanged[int].connect( self.setMultFactor )
        self.ui.scalingSlider.setValue(10)
        self.ui.scaling.setText( 'Motor scaling is %d' % self.ui.scalingSlider.value() ) 
        self.ui.sourceCombo.activated[int].connect( self.setSource )
        self.ui.comboBox.activated.connect ( self.resolutionSelect )
                


        self.setAble('settings', False)
        self.setAble('motors', False)
        self.setAble('center', False)
        self.setAble('reset', False)
        self.setAble('save', False)
    
    def setSource ( self, value ):
        if self._cap:
            if self._cap._capture.isOpened(): #ugly access to VideoCapture object from managers
                self._cap._capture.release()
        
        self._cap = CaptureManager( cv2.VideoCapture(int(value)) )
        #self.indWindow = cv2.namedWindow("Camera Display")
        self.showImage = True
        self.actualRes = self._cap.getResolution()


        # Set to default resolution from camera (640x480)
        #res = "%d x %d" % (int(self.actualRes[0]), int(self.actualRes[1]) )
        #index = self.ui.comboBox.findText(res) #Sets picker to default resolution
        #self.ui.comboBox.setCurrentIndex(index)

        # Set to Hoky desired default res (1280x960)
        res = "%d x %d" % (int(self.idealRes[0]), int(self.idealRes[1]) )
        index = self.ui.comboBox.findText(res) #Sets picker to default resolution
        self.ui.comboBox.setCurrentIndex(index)
        self.resolutionSelect()
        




        self.p = Point(200,300)
        self.c = Point(self.actualRes[0] // 2, self.actualRes[1] // 2)
        self.setAble('settings', True)

    def resolutionSelect ( self ):
        # Get desired resolution from the user: 
        self.idealRes = self.ui.comboBox.currentText().split('x')
        self.idealRes = map(lambda x: int(x), self.idealRes)

        logger.debug("User input res: %d %d" % (self.idealRes[0], self.idealRes[1])  )
            
        # Attempt to change the resolution:
        self._cap.setProp('width', self.idealRes[0])
        self._cap.setProp('height', self.idealRes[1])
        self.actualRes = self._cap.getResolution()

        # Something went wrong with resolution assignment -- possible need for shut down if resolution can't even be queried
        self.c = Point(self.actualRes[0] // 2, self.actualRes[1] // 2)
        if not ( self.actualRes == self.idealRes ):
            QtWidgets.QMessageBox.information( self, "Resolution",
                                              "That resolution isn't supported by the camera. Instead, your actual resolution is: %d x %d" % (self.actualRes[0], self.actualRes[1] ) )
            res = "%d x %d" % (int(self.actualRes[0]), int(self.actualRes[1]) )
            index = self.ui.comboBox.findText(res)
            self.ui.comboBox.setCurrentIndex(index)


    def save( self ):
        c = os.path.dirname(os.getcwd())
        cc = os.path.dirname(c)
        f = open( ("%s/config.txt" % cc) , "w")
        f.write('%d|%d|%s|%s\n' % ( self.actualRes[0], self.actualRes[1], self.ui.widthLine.text(), self.ui.sourceCombo.currentText() ))
        f.close()
        
         
        

    def setMultFactor ( self, value ):
        self.multFactor = value
        self.ui.scaling.setText( 'Step scaling is %d' % value)
    
    def setAble( self, group, ability ):
        groups = { 'motors' : [self.ui.buttonLeft, 
                               self.ui.buttonRight,
                               self.ui.buttonUp, 
                               self.ui.buttonDown, 
                               self.ui.scalingSlider, 
                               self.ui.scaling],
                  'center' : [self.ui.buttonCenter], 
                  'reset' : [self.ui.buttonReset],
                  
                  'settings': [self.ui.resDesc,
                               self.ui.framewDesc,
                               self.ui.buttonSet, 
                               self.ui.widthLine,
                               self.ui.comboBox],
                  'save': [self.ui.buttonSave]
                  }
        
        for g in groups[group]:
            g.setEnabled(ability)

            



    def motors( self, direction):
        xdir = 1
        ydir = 1 #comp up is down
        t = 300

        dir ={ 'left': (t, xdir * self.multFactor * (-1), 0 ) ,
              'right': ( t, xdir * self.multFactor * (1), 0 ),
              'up': (t, 0, ydir * self.multFactor * (1) ),
              'down': (t, 0, ydir * self.multFactor * (-1) )
              }

        #print dir[direction]
        th = threading.Thread(target  = self.ebb.move , args = dir[direction] )
        try:
            #partial(self.ebb.move, dir[direction]) 
            th.start()
            #th.join()
        except Exception as e:
            logger.exception( str(e) )


    def reset ( self ):
        
        self.ebb.move(300, -self.colSteps, -self.rowSteps)
        if self.ebb.connected:
            self.ebb.closeSerial()
        self.setAble('motors', False)
        self.setAble('settings', True)

    def center ( self ):
        #th = threading.Thread(target  = self.ebb.move , args = (100,200,300) )
        try:
            #partial(self.ebb.move, dir[direction]) 
            self.colSteps, self.rowSteps = self.ebb.centerWorm(300,200,300)
            #th.join()
        except Exception as e:
            logger.exception( str(e) )

        
        self.setAble('reset', True)
        self.setAble('save', True)
        

    def set( self ):
        if self.ui.widthLine.text():
            try:
                float( self.ui.widthLine.text() )
            except ValueError as e:
                QtWidgets.QMessageBox.information( self, "Invalid Width", "Cannot convert that width to a float! Make sure you enter a number (decimals accepted)")
                return
       
        if self.ui.widthLine.text() and self.ui.comboBox.currentText():
            self.resolutionSelect()
            # Instantiate motors    
            try:
                self.ebb = EasyEBB( resolution = self.actualRes, sizeMM = float(self.ui.widthLine.text() ) )
                  

                self.setAble('settings', False)
                self.setAble('motors', True)
                self.setAble('center', True)
            except serial.SerialException as e:
                logger.exception(e)
                QtWidgets.QMessageBox.information( self, "Motors Issue",
                                                "Please connect motors or restart them, having issues connecting now")
        else:
            QtWidgets.QMessageBox.information( self, "Validation",
                                                  "Please enter a measurement for the width and double-check your resolution choice") 

        

    def closeEvent ( self, event):
        logger.debug("closing")
        if self._cap._capture.isOpened():
            self._cap._capture.release()
        #if self._cap:
        #    cv2.destroyWindow("Camera Display")
        #    cv2.destroyAllWindows()
        if self.ebb:
            self.ebb.closeSerial()
    
    def isColor( self, imgIn ):
        s = np.shape(imgIn)
        if  np.size(s) == 3:
            try:
                ncolor = np.shape(imgIn)[2]
                boolt = int(ncolor) > 2
                return boolt
            except IndexError:
                QtWidgets.QMessageBox.information( self, "Camera Issue", "Please Check Camera Source")
                logger.error("Something wrong with camera input")
                self.showImage = False
        else:
            return False
         
        
    def play( self ):
        if self.showImage:
            self._cap.enterFrame()
            self.currentFrame = self._cap.frame
        
            self.color = self.isColor(self.currentFrame)
            t1 = time.time()
            if self.color:
                try:
                    self.currentFrame = cv2.cvtColor(self.currentFrame, cv2.COLOR_BGR2GRAY)
                    if self.currentFrame is not None:
                        self.currentFrame = self.p.draw(self.currentFrame, 'black-1')
                        self.currentFrame = self.c.draw(self.currentFrame, 'black-1')
                   
                        self.ui.videoFrame.setPixmap(self._cap.convertFrame(self.currentFrame))
                        self.ui.videoFrame.setScaledContents(True)
                    else:
                        QtWidgets.QMessageBox.information( self, "Camera Issue", "Please Check Camera Source")
                        logger.error("Something wrong with camera input")
                
                    #cv2.imshow( "Camera Display",self.currentFrame)
                except TypeError:
                    logger.exception("No Frame")
                    QtWidgets.QMessageBox.information( self, "Camera Issue", "Please Check Camera Source")

                finally:
                    self._cap.exitFrame()
            else:
                try:
                    if self.currentFrame is not None:
                        self.currentFrame = self.p.draw(self.currentFrame, 'black-1')
                        self.currentFrame = self.c.draw(self.currentFrame, 'black-1')
                        self.ui.videoFrame.setPixmap(self._cap.convertFrame(self.currentFrame))
                        self.ui.videoFrame.setScaledContents(True)
                    else:
                        QtWidgets.QMessageBox.information( self, "Camera Issue", "Please Check Camera Source")
                        logger.error("Something wrong with camera input")
                        logger.exception("No Frame")
                        self.showImage = False

                        #cv2.imshow( "Camera Display",self.currentFrame)
                except TypeError:
                    logger.exception("No Frame")
                finally:
                    self._cap.exitFrame()

            self._cap.exitFrame()
Exemplo n.º 52
0
class Browser(object):
    
    def __init__(self,video_source):  
        self._windowManager = WindowManager('Browser', self.onKeypress)
        self._captureManager = CaptureManager(video_source, self._windowManager, True)
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False
        self._curveFilter = filters.BGRPortraCurveFilter()
    
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            if frame is not None:
                print "got frame" 
                self._faceTracker.update(frame)
                faces = self._faceTracker.faces
                rects.swapRects(frame, frame,
                                [face.faceRect for face in faces])
            
                #filters.strokeEdges(frame, frame)
                #self._curveFilter.apply(frame, frame)
                
                if self._shouldDrawDebugRects:
                    self._faceTracker.drawDebugRects(frame)
            else:
                print "got None frame"
                print "press any key to exit."
                cv2.waitKey(0)
                break
            self._captureManager.exitFrame()
            waitkey_time=1
            if self._captureManager._video_source!=0:   
                waitkey_time=500
            self._windowManager.processEvents(waitkey_time)
    
    def onKeypress(self, keycode):
        """Handle a keypress.
        
        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        x      -> Start/stop drawing debug rectangles around faces.
        escape -> Quit.
        
        """
        if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: # tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo(
                    '/Users/xcbfreedom/Documents/screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 120: # x
            self._shouldDrawDebugRects = \
                not self._shouldDrawDebugRects
        elif keycode == 27: # escape
            self._windowManager.destroyWindow()
Exemplo n.º 53
0
 def __init__(self):
     self._windowManager = WindowManager("Cameo", self.onKeypress)
     self._captureManager = CaptureManager(
         cv2.VideoCapture("/home/abner0908/Videos/MyInputVid.avi"), self._windowManager, True
     )
Exemplo n.º 54
0
class Tracker ( object ):
    
    def __init__( self, method, src ):

        ### Sensitivity of tracker params
        self._sampleFreq = 0.1 #in sec
        
        ### Set Camera params
        #self.resolution = (640, 480 )
        self.resolution = (1280, 960)
        source = {
            0:0, 
            1:1, 
            2:'led_move1.avi', 
            3:'screencast.avi', 
            4:'screencast 1.avi',
            5: 'shortNoBox.avi',
            6: 'longNoBox.avi',
            7: 'H299.avi',
            8: 'testRec.avi',
            9: 'longDemo.avi',
            10: 'worm2014_05_05-12-44-53.avi'
            }
        self.color = True
        self.captureSource = source[int(src)]
        
        ### Timing initialization
        self._startTime = time.time()
        self._lastCheck = self._startTime - self._sampleFreq

        ### Display params
        self.mirroredPreview = False


        ### Initialize Objects       

        ##### Windows

        self._rawWindow = WindowManager( 'RawFeed', self.onKeypress )

        ### Capture -- resolution set here
        self._cap = CaptureManager( 
            cv2.VideoCapture(self.captureSource), 
            self._rawWindow, 
            self.mirroredPreview, self.resolution)
        
        actualCols, actualRows = self._cap.getResolution()
        ## from here on out use this resolution 
        
        ### Arguments for finder
        self.finderArgs = {
            'method' : method,
            'gsize' :  45,
            'gsig' : 9,
            'window' : 3,
            'boundBoxRow' : 150,
            'boundBoxCol' : 150,
            'limRow' : 100,
            'limCol' : 100,
            'MAXONEFRAME': 500,
            'REFPING' : 600000,
            'MAXREF': 1000,
            'capCols':actualCols,
            'capRows': actualRows,
            'color' : self.color
            }

        self._wormFinder = WormFinder( **self.finderArgs )     

        ##### Debugging
        self._overlayWindow = WindowManager( 'Overlay', self.onKeypress )
        self.motorsOn = False



    def run( self ):

        # Show windows
        self._rawWindow.createWindow()
        self._overlayWindow.createWindow()

        while self._rawWindow.isWindowCreated:
            self._cap.enterFrame()
            frame = self._cap.frame

            # Probably not useful, removes errors when playing from video
#            if not self._captureManager.gotFrame:
#                self.shutDown()
#                break

            # Display raw frame to rawWindow
            
            t1 = time.time()
            # Get frame
            frame = self._cap.frame

            # Show frame to raw feed
            self._rawWindow.show(frame)

            # If tracking is enabled or motors are on, start tracking
            if time.time() - self._lastCheck >= self._sampleFreq:
                if self.finderArgs['method'] in ['lazyc', 'lazyd']:
                    self.gaussian = self._wormFinder.processFrame( frame )
                    self.overlayImage = copy.deepcopy(frame)
                    if self.motorsOn:
                        self._wormFinder.decideMove()
                    self._lastCheck = time.time()
                    self._wormFinder.drawDebugCropped( self.overlayImage)
                    self._wormFinder.drawTextStatus(self.overlayImage,self._cap.isWritingVideo, self.motorsOn)
                    
                    self._overlayWindow.show(self.overlayImage)                    

                if self.finderArgs['method'] in ['test','conf']: 
                    self._wormFinder.drawTest( frame )
                    
                    

            self._cap.exitFrame()
            self._rawWindow.processEvents()
            logt.debug('frame processing took: %0.6f' % (time.time() - t1))
    
    @property
    def isDebug( self ):
        return logt.getEffectiveLevel() <= logging.INFO

    def shutDown( self ):
        self._rawWindow.destroyWindow()
        #if not self.isDebug:
        if self._cap.isWritingVideo:
            self._cap.stopWritingVideo()
        try:
#            self._wormFinder.writeOut('%s-%s' % (self.finderArgs['method'], self.captureSource))
            self._wormFinder.servos.disableMotors()
            self._wormFinder.servos.closeSerial()
        except Exception as e:
            logt.exception(str(e))

    def onKeypress ( self, keycode ):
        '''
        Keypress options
        <SPACE> --- Motors On
        < TAB > --- start/stop recording screencast
        < ESC > --- quit

        '''

        if keycode == 32: #space

            if self.motorsOn:
                self.motorsOn = False#_captureManager.writeImage('screenshot.png')
                if not self.isDebug:
                    self._wormFinder.servos.disableMotors()
                        #cv2.displayOverlay('Overlay','Motors disabled', 0)
            else:
                self.motorsOn = True
                self._wormFinder.launch = 0
                if not self.isDebug:
                    self._wormFinder.servos.enableMotors()
                    self._wormFinder.launch = 0
                    time.sleep(2)
                    #cv2.displayOverlay('Overlay','Motors enabled', 0)

        elif keycode == 9: #tab
            if not self._cap.isWritingVideo:
                self._cap.startWritingVideo(
                    'worm%s.avi' % time.strftime("%Y_%m_%d-%H-%M-%S", time.localtime(time.time())),
                    cv2.cv.CV_FOURCC(*'MJPG'))
#                cv2.displayOverlay('Overlay','Writing Video', 0)
            else:
                self._cap.stopWritingVideo()
#                cv2.displayOverlay('Overlay','Not writing Video', 0)

        elif keycode == 27: #escape
            self.shutDown()
Exemplo n.º 55
0
 def __init__(self):
     self._windowManager = WindowManager('Cameo',
                                         self.onKeypress)
     self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                           self._windowManager, True)
Exemplo n.º 56
0
class Facedetect(object):
    
    def __init__(self):
        self._windowManager = WindowManager('Facedetect', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(camera_nr), self._windowManager, True)
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = True
        self._curveFilter = filters.BGRPortraCurveFilter()

    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            if frame is not None:
                
                t = cv2.getTickCount()
                self._faceTracker.update(frame)
                faces = self._faceTracker.faces
                t = cv2.getTickCount() - t
                print("time taken for detection = %gms" % (t/(cv2.getTickFrequency())*1000.))
                
                # uncomment this line for swapping faces
                #rects.swapRects(frame, frame, [face.faceRect for face in faces])
                
                #filters.strokeEdges(frame, frame)
                #self._curveFilter.apply(frame, frame)
                
                if self._shouldDrawDebugRects:
                    self._faceTracker.drawDebugRects(frame)
                    self._faceTracker.drawLinesFromCenter(frame)
                
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
    
    def onKeypress(self, keycode):
        """Handle a keypress.
        
        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        x      -> Start/stop drawing debug rectangles around faces.
        escape -> Quit.
        
        """
        if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: # tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo(
                    'screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 120: # x
            self._shouldDrawDebugRects = \
                not self._shouldDrawDebugRects
        elif keycode == 27: # escape
            self._windowManager.destroyWindow()
            # When everything is done, release the capture
            self._captureManager.release()
Exemplo n.º 57
0
 def __init__(self):
     self._windowManager = WindowManager('Facedetect', self.onKeypress)
     self._captureManager = CaptureManager(cv2.VideoCapture(camera_nr), self._windowManager, True)
     self._faceTracker = FaceTracker()
     self._shouldDrawDebugRects = True
     self._curveFilter = filters.BGRPortraCurveFilter()
Exemplo n.º 58
0
class Gui(QtWidgets.QMainWindow):

    def __init__(self, parent=None):
        QtWidgets.QWidget.__init__(self, parent)

        logger.warning('starting up')
        self.startRecTime = None
        ## Instantiate classes
        self.ui = Ui_TrackerUI()
        self.ui.setupUi(self)
        self._cap  = None
        #What to display on screeeeeeen
        source = [0, 1, 2, 3 ]

        try:
            dirList = os.listdir(os.getcwd())
            for fileName in dirList:
                if 'avi' in fileName:
                    source.append(fileName)
        except Exception as e:
            logger.exception(str(e))
        for s in source:
            self.ui.srcCombo.addItem(str(s))
        self.ui.srcCombo.setCurrentText('0')
        self.showImage = False ## If True, video gets displayed (need _cap)

        ## Stuff you can change in the UI
        self.idealRes = [1280, 960]

        ## Video stuff
        self._timer = QtCore.QTimer( self )
        self._timer.timeout.connect( self.play )
        self._timer.start(27)
        self.update()

        ## Tracking parameters
        self._lastCheck = time.time()
        self._sampleFreq = 0.1
        self.motorsOn = False
        self.runTracking = False
        self.ebb = None 
        self._wormFinder = None  
        cv2.namedWindow('gaussian')


        ## Register button actions
        self.ui.buttonRight.clicked.connect( partial( self.motors, 'right' ) )
        self.ui.buttonLeft.clicked.connect( partial( self.motors, 'left' ) )
        self.ui.buttonUp.clicked.connect( partial( self.motors, 'up' ) )
        self.ui.buttonDown.clicked.connect( partial( self.motors, 'down' ) )
        self.ui.buttonRefresh.clicked.connect( self.getNewRef )
        self.ui.buttonRun.clicked.connect( self.run )
        self.ui.buttonConnect.clicked.connect( self.connectMotors )
        self.ui.buttonReset.clicked.connect (self.resetAll )

        self.ui.buttonMotorized.clicked.connect( self.motorized )

        self.ui.buttonRec.clicked.connect( self.record )

        self.ui.srcCombo.activated['QString'].connect( self.setSource )

        self.ui.scalingSlider.valueChanged[int].connect( self.setMultFactor )
        self.ui.scalingSlider.setValue(10) #default to 10 steps
        self.ui.scaling.setText( 'Step scaling is %d' % self.ui.scalingSlider.value() ) 
        self.multFactor = self.ui.scalingSlider.value()
        self.ui.studyName.returnPressed.connect( self.setFileName )

        ## Change button colors for pretty
 
        self.ui.buttonMotorized.setText("Turn On")
        self.ui.buttonRec.setText("Start")
        self.ui.fps.setText("")
        self.ui.lcdNumber.display("")
        ## Read in file from calibration GUI
        self.readInCalibration()

        ## Disable buttons
        self.setAble( 'motors', False ) 
        self.setAble( 'motorized', False ) 
        self.setAble( 'source', False ) 
        self.setAble( 'buttons', False ) 
        self.setAble( 'recording', False )

        

    def setAble( self, group, ability ): 
        groups = { 'connectMotors' : [ self.ui.buttonConnect ],
                   'motors' : [ self.ui.scaling, 
                                self.ui.buttonUp, 
                                self.ui.buttonDown, 
                                self.ui.buttonLeft,
                                self.ui.buttonRight ], 
                   'motorized' : [ self.ui.buttonMotorized, 
                                   self.ui.motorsDesc ], 
                   'study' : [ self.ui.studyDesc, 
                               self.ui.studyName ], 
                   'source': [ self.ui.srcDesc, 
                               self.ui.srcCombo ], 
                   'buttons': [ self.ui.buttonRun, 
                                self.ui.buttonConnect, 
                                self.ui.buttonRefresh ], 
                   'recording' : [ self.ui.recordingDesc, 
                                   self.ui.buttonRec ],
                   'reset' : [ self.ui.scaling, 
                               self.ui.buttonUp, 
                               self.ui.buttonDown, 
                               self.ui.buttonLeft,
                               self.ui.buttonRight,
                               self.ui.buttonMotorized, 
                               self.ui.motorsDesc,
                               self.ui.studyDesc, 
                               self.ui.studyName,
                               self.ui.buttonRun, 
                               self.ui.buttonConnect, 
                               self.ui.buttonRefresh,
                               self.ui.recordingDesc, 
                               self.ui.buttonRec ] 
               }
        for c in groups[group]:
            c.setEnabled(ability)

    def setFileName ( self ) :
        self.shareFileName = '%s/%s-%s' % ('output', self.ui.studyName.text(), time.strftime( "%a_%d_%b_%Y-%H%M%S" ) )
        self.setLogging('debug')
        self.setAble( 'study', False ) 
        self.setAble( 'buttons', True)
        self.ui.buttonRefresh.setEnabled(False) 

    def getNewRef( self ):
        if self._wormFinder:
            self._wormFinder.resetRef()


    def record( self ):
        if self._cap.isWritingVideo:
            self._cap.stopWritingVideo()
            self.ui.buttonRec.setStyleSheet('QPushButton {color:green}')
            self.ui.buttonRec.setText("Start")
            self.startRecTime = None
        else:
            self.startRecTime = time.time()
            self.shareFileName = '%s/%s-%s' % ('output', 
                                               self.ui.studyName.text(), 
                                               time.strftime( "%a_%d_%b_%Y-%H%M%S" ) )
            logger.debug('New sharefilename')

            platform = sys.platform.lower()
            if platform in ['win32', 'win64']:
                fourcc = cv2.VideoWriter_fourcc(*'IYUV')
                logger.warning( 'fourcc is IYUV')
            else:
                fourcc = cv2.cv.CV_FOURCC(*'MJPG')#cv2.VideoWriter_fourcc(*'MJPG')
                logger.warning( 'fourcc is MJPG')
            if not self._cap.isWritingVideo:
                self._cap.startWritingVideo('%s.avi' % self.shareFileName, 
                                            fourcc)    
            self.ui.buttonRec.setStyleSheet('QPushButton {color:red}')
            self.ui.buttonRec.setText("Stop")

    def motorized( self ):
        if self.motorsOn:
            self.motorsOn = False
            logger.info('motors off')
            self.ui.buttonMotorized.setStyleSheet('QPushButton {color:green}')
            self.ui.buttonMotorized.setText("Turn On")
            self.setAble('motors', True) 
        else:
            self.motorsOn = True
            self._wormFinder.launch = 0
            logger.info('motors on')
            self.ui.buttonMotorized.setStyleSheet('QPushButton {color:red}')
            self.ui.buttonMotorized.setText("Turn Off")
            self.setAble('motors', False) 
    def run ( self ):
        logger.info("Start worm finder")

        if self._wormFinder:
            self._wormFinder = None

        if not self._cap:
            self.setSource(self.ui.sourceCombo.currentText())

        self.centerPt = utils.Point(self.actualRes[0] / 2, self.actualRes[1] / 2)
        self.cropSize = 150
        
        self.finderArgs = {
            'gsize' :  45,
            'gsig' : 9,
            'window' : 3,
            'method' : 'lazyc',
            'MAXONEFRAME': 500,
            'REFPING' : 1000,
            'MAXREF': 1000,
            'cropSize': self.cropSize,
            'centerPt': self.centerPt,
            'servos': self.ebb,
            'actualRes': self.actualRes,
            'motorsOn': self.motorsOn
            }


        self._wormFinder = WormFinder( **self.finderArgs )     

        self.runTracking = True
        self.startTrackTime = time.time()
        self.showImage = True
        self.ui.buttonRun.setEnabled(False)
        self.ui.buttonRefresh.setEnabled(True) 
        if self.ebb:
            self.setAble('motorized', True )
        self.setAble('recording', True ) 
    
    def connectMotors( self ):
        logger.info("Connect Motors")
        if self._cap:
            try:
                logger.debug('width mm %d' % self.widthMM)
                self.ebb = EasyEBB(resolution = self.actualRes, sizeMM = self.widthMM)
                self.setAble('motors', True)
                self.setAble('connectMotors', False)
                self._cap.resetFPSestimate()
                if self._wormFinder:
                    self._wormFinder.servos = self.ebb
                    self.setAble('motorized', True) 
            except serial.SerialException as e:
                logger.exception(str(e))
                QtWidgets.QMessageBox.information( self, "Motors Issue",
                                                  "Unable to connect to motors. Please connect or reboot motors. If problem persists, reboot computer")

    def setSource ( self, value ):
        self.existingVid = False

        if type(value) != int:
            nums = re.compile(r'\d*')
            mat = re.match(nums,value).group(0)
            
            if re.match(nums, value).group(0) is not u'':
                src = int(value)
            else:
                src = str(value)
                self.existingVid = True
        else: #read in from calibration
            src = value
            self.ui.sourceCombo.setCurrentText(str(src))
            
        if self._cap:
            if self._cap._capture.isOpened(): #ugly access to VideoCapture object from managers
                self._cap._capture.release()

        
        self._cap = CaptureManager( cv2.VideoCapture(src) )
        self.showImage = True
        
        self.actualRes = self._cap.getResolution()
        
        if self.actualRes != self.calibrationRes and not self.existingVid:
            self._cap.setProp( 'height', self.calibrationRes[1] )
            self._cap.setProp( 'width', self.calibrationRes[0] )
        
        self.showImage = True

        self.actualRes = self._cap.getResolution()
        logger.warning("Actual resolution from cam is %s" % str( self.actualRes ) )
        self.setAble('source', False) 
        self.setAble('buttons', True)
        self.ui.buttonRefresh.setEnabled(False)

    def setLogging( self, value ):
        addFile = True
        formatter = logging.Formatter( '%(asctime)s\t%(levelname)s\t%(name)s\t\t%(message)s' )
        file = logging.FileHandler( '%s.log' % self.shareFileName )
        logger.warning('New file')
        file.setLevel( LOGGING_LEVELS['debug'] )
        file.setFormatter( formatter )
        
        console = logging.StreamHandler()
        console.setLevel( LOGGING_LEVELS['debug'] )
        console.setFormatter( formatter )
        
        for h in logger.handlers:
            if type(h) == logging.StreamHandler:
                logger.removeHandler( h ) 
            elif type(h) == logging.FileHandler: #if file has started, leave it be!!!
                addFile = False

        if addFile:
            logger.addHandler( file )

        logger.addHandler( console )
        self.newFileFLAG = False

    def setMultFactor ( self, value ):
        self.multFactor = value
        self.ui.scaling.setText( 'Step scaling is %d' % value)
   
   
    def readInCalibration( self ):
        c = os.path.dirname(os.getcwd())
        cc = os.path.dirname(c)
        f = open( ("%s/config.txt" % cc) , "r")
        line = f.readline()
        f.close()        
        logger.warning('line: %s' % str(line)  )
        w, h, widthMM, source = line.split('|')
        self.widthMM = float(widthMM)
        self.calibrationRes = [int(w), int(h)]
        if source is not None:
            self.setSource( source )



    def motors( self, direction):
        if self.ebb:
            xdir = 1
            ydir = 1 #up is down
            t = 100

            dir ={ 'left': (t, xdir * self.multFactor * (-1), 0 ) ,
                  'right': ( t, xdir * self.multFactor * (1), 0 ),
                  'up': (t, 0, ydir * self.multFactor * (1) ),
                  'down': (t, 0, ydir * self.multFactor * (-1) )
                  }

            #print dir[direction]
            th = threading.Thread(target  = self.ebb.move , args = dir[direction] )
            try:
                th.start()

            except Exception as e:
                logger.exception( str(e) )
    
    def resetAll ( self ):
        if self.ebb:
            self.ebb.closeSerial()
        if self._wormFinder:
            self._wormFinder = None
            self.runTracking = False
            cv2.destroyWindow('gaussian')
            
        if self._cap.isWritingVideo:
            self.record()
            self.ui.buttonRec.setStyleSheet('QPushButton {}')
#            self._cap.stopWritingVideo()

        if self._cap._capture.isOpened():
            self._cap._capture.release()

        self.showImage = False
        self.runTracking = False
        if self.motorsOn:
            self.motorized()
            self.motorsOn = False
            self.ui.buttonMotorized.setStyleSheet('QPushButton {}')
        self.ui.videoFrame.setText("Select source if video not displayed here on boot up")
        self.ui.fps.setText("")
        logger.info("Reset button pressed") 

        self.setAble('source', True)
        self.setAble('reset', False) 

    def closeEvent ( self, event):
        logger.debug("closing")
        if self.ebb:
            self.ebb.closeSerial()
        if self._cap.isWritingVideo:
            self._cap.stopWritingVideo()
        
        for h in logger.handlers:
            if type(h) == logging.FileHandler:
                h.close()
        
        try:
            cv2.destroyWindow('gaussian')
        except Exception as e:
            pass
            
    def isColor( self, imgIn ):
        s = np.shape(imgIn)
        if s == 3:
            try:
                ncolor = np.shape(imgIn)[2]
                boolt = int(ncolor) > 2
                return boolt
            except IndexError:
                if self.existingVid:
                    logger.warning('Video has ended')
                    self.existingVid = False
                    self.resetAll()
                else:
                    logger.warning('Issue with video input')
                    self.resetAll()
        else:
            return False
   
    def play( self ):
        
        if self.showImage:
            ## Get image from camera
            self._cap.enterFrame()
            self.currentFrame = self._cap.getFrame()
        
            self.color = self.isColor(self.currentFrame)

            t1 = time.time()

            if self.color:
                try:
                    self.currentFrame = cv2.cvtColor(self.currentFrame, cv2.COLOR_BGR2GRAY)
                except TypeError:
                    logger.exception("No Frame")
                finally:
                    self._cap.exitFrame()
            else:
                self._cap.exitFrame()
            
            if not self.runTracking: #no finder
                if self.showImage: #yes capture
                    self.ui.videoFrame.setPixmap(self._cap.convertFrame(self.currentFrame))
                    self.ui.videoFrame.setScaledContents(True)
            else: # yes finder
                ## Stop if too dark!!!
                if time.time() - self.startTrackTime < 5:
                    self.firstAvgPixIntensity = np.mean(self.currentFrame)
                    logger.warning('first avg int: %d' % self.firstAvgPixIntensity)
               ## Tracking procedure
                if time.time() - self._lastCheck >= self._sampleFreq:
                    gaussian = self._wormFinder.processFrame( self.currentFrame )
                    if gaussian is not None:
                        cv2.imshow( 'gaussian', gaussian )
                        
                    if self.motorsOn: #yes motorized
                        ## Stop motors if too dark
                        self.currentAvgPixIntensity = np.mean(self.currentFrame)
                        logger.warning('current avg int: %d' % self.currentAvgPixIntensity)
                        if self.currentAvgPixIntensity < self.firstAvgPixIntensity - 50:
                            logger.warning('Darkening of picture: motors turned off')
                            if self.motorsOn:
                                self.motorized()
                            if self._cap.isWritingVideo:
                                self.record()
                        else:
                          self._wormFinder.decideMove()
                    self._lastCheck = time.time()
                    self._wormFinder.drawDebugCropped( self.currentFrame)
                    
                    self.ui.videoFrame.setPixmap(self._cap.convertFrame(self.currentFrame))
                    self.ui.videoFrame.setScaledContents(True)

            if self._cap._fpsEstimate:
               self.ui.fps.setText( 'FPS: %0.2f' % ( self._cap._fpsEstimate ))

            if self.startRecTime:
                elapsedSec = time.time() - self.startRecTime
                elapsed = time.strftime("%H.%M.%S", time.gmtime(elapsedSec) ) 
                self.ui.lcdNumber.setNumDigits(8)
                self.ui.lcdNumber.display( elapsed )
            else:
                self.ui.lcdNumber.display("") 
        return
        
    '''
Exemplo n.º 59
0
#    Foobar is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with OpenSeizureDetector.  If not, see <http://www.gnu.org/licenses/>.
##############################################################################
#
from managers import CaptureManager
import depth

device = depth.CV_CAP_FREENECT
#device = 1
print "device=%d" % device
captureManager = CaptureManager(
    device, None, True)
captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE

captureManager.enterFrame()

#frame = captureManager.frame

captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP
captureManager.writeImage("kintest1.png")
captureManager.exitFrame()
captureManager.enterFrame()
captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE
captureManager.writeImage("kintest2.png")
captureManager.exitFrame()