コード例 #1
0
 def __init__(self):
     self._windowManager = WindowManager('benFinder',
                                          self.onKeypress)
     device = depth.CV_CAP_FREENECT
     #device = 1
     print "device=%d" % device
     self._captureManager = CaptureManager(
         device, self._windowManager, True)
     self._captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE
     self._faceTracker = FaceTracker()
     self._shouldDrawDebugRects = False
     self._backgroundSubtract = False
     self._autoBackgroundSubtract = False
     self._curveFilter = filters.BGRPortraCurveFilter()
     self.background_video_img = None
     self.background_depth_img = None
     self.autoBackgroundImg = None
     self._ts = TimeSeries()
     self._frameCount = 0
コード例 #2
0
    def __init__(self, save=False, inFile=None):
        print "benFinder.__init__()"
        print os.path.realpath(__file__)
        configPath = "%s/%s" % (os.path.dirname(
            os.path.realpath(__file__)), self.configFname)
        print configPath
        self.cfg = ConfigUtil(configPath, self.configSection)

        self.debug = self.cfg.getConfigBool("debug")
        if (self.debug): print "Debug Mode"

        self._wkdir = self.cfg.getConfigStr("working_directory")
        if (self.debug): print "working_directory=%s\n" % self._wkdir
        self._tmpdir = self.cfg.getConfigStr("tmpdir")
        if (self.debug): print "tmpdir=%s\n" % self._tmpdir

        # Check if we are running from live kinect or a file.
        if (inFile):
            device = depth.CV_CAP_FILE
        else:
            device = depth.CV_CAP_FREENECT

        # Initialise the captureManager
        self._captureManager = CaptureManager(device,
                                              None,
                                              True,
                                              inFile=inFile)
        self._captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP

        # If we are runnign from a file, use the first frame as the
        # background image.
        if (inFile):
            self.saveBgImg()

        # If we have asked to save the background image, do that, and exit,
        # otherwise initialise the seizure detector.
        if (save):
            self.saveBgImg()
        else:
            self.loadBgImg()
            self.autoBackgroundImg = None
            self._status = self.ALARM_STATUS_OK
            self._ts = TimeSeries(
                tslen=self.cfg.getConfigInt("timeseries_length"))
            self._frameCount = 0
            self._outputFrameCount = 0
            self._nPeaks = 0
            self._ts_time = 0
            self._rate = 0
            self._ws = webServer.benWebServer(self)
            self._ws.setBgImg(
                "%s/%s" %
                (self._tmpdir, self.cfg.getConfigStr("background_depth")))
            self._ws.setChartImg(
                "%s/%s" % (self._tmpdir, self.cfg.getConfigStr("chart_fname")))
            self._ws.setRawImg(
                "%s/%s" %
                (self._tmpdir, self.cfg.getConfigStr("raw_image_fname")))
            self._ws.setMaskedImg(
                "%s/%s" %
                (self._tmpdir, self.cfg.getConfigStr("masked_image_fname")))
            self._ws.setDataFname(
                "%s/%s" % (self._tmpdir, self.cfg.getConfigStr("data_fname")))
            self._ws.setAnalysisResults({})
            webServer.setRoutes(self._ws)
            self.run()
コード例 #3
0
class BenFinder(object):
    configFname = "config.ini"
    configSection = "benFinder"

    ALARM_STATUS_OK = 0  # All ok, no alarms.
    ALARM_STATUS_WARN = 1  # Warning status
    ALARM_STATUS_FULL = 2  # Full alarm status.
    ALARM_STATUS_NOT_FOUND = 3  # Benjamin not found in image

    # (area below config area_threshold parameter)

    def __init__(self, save=False, inFile=None):
        print "benFinder.__init__()"
        print os.path.realpath(__file__)
        configPath = "%s/%s" % (os.path.dirname(
            os.path.realpath(__file__)), self.configFname)
        print configPath
        self.cfg = ConfigUtil(configPath, self.configSection)

        self.debug = self.cfg.getConfigBool("debug")
        if (self.debug): print "Debug Mode"

        self._wkdir = self.cfg.getConfigStr("working_directory")
        if (self.debug): print "working_directory=%s\n" % self._wkdir
        self._tmpdir = self.cfg.getConfigStr("tmpdir")
        if (self.debug): print "tmpdir=%s\n" % self._tmpdir

        # Check if we are running from live kinect or a file.
        if (inFile):
            device = depth.CV_CAP_FILE
        else:
            device = depth.CV_CAP_FREENECT

        # Initialise the captureManager
        self._captureManager = CaptureManager(device,
                                              None,
                                              True,
                                              inFile=inFile)
        self._captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP

        # If we are runnign from a file, use the first frame as the
        # background image.
        if (inFile):
            self.saveBgImg()

        # If we have asked to save the background image, do that, and exit,
        # otherwise initialise the seizure detector.
        if (save):
            self.saveBgImg()
        else:
            self.loadBgImg()
            self.autoBackgroundImg = None
            self._status = self.ALARM_STATUS_OK
            self._ts = TimeSeries(
                tslen=self.cfg.getConfigInt("timeseries_length"))
            self._frameCount = 0
            self._outputFrameCount = 0
            self._nPeaks = 0
            self._ts_time = 0
            self._rate = 0
            self._ws = webServer.benWebServer(self)
            self._ws.setBgImg(
                "%s/%s" %
                (self._tmpdir, self.cfg.getConfigStr("background_depth")))
            self._ws.setChartImg(
                "%s/%s" % (self._tmpdir, self.cfg.getConfigStr("chart_fname")))
            self._ws.setRawImg(
                "%s/%s" %
                (self._tmpdir, self.cfg.getConfigStr("raw_image_fname")))
            self._ws.setMaskedImg(
                "%s/%s" %
                (self._tmpdir, self.cfg.getConfigStr("masked_image_fname")))
            self._ws.setDataFname(
                "%s/%s" % (self._tmpdir, self.cfg.getConfigStr("data_fname")))
            self._ws.setAnalysisResults({})
            webServer.setRoutes(self._ws)
            self.run()

    def run(self):
        """Run the main loop."""
        while (True):
            self._captureManager.enterFrame()

            frame = self._captureManager.frame

            if frame is not None:
                if (self.autoBackgroundImg == None):
                    self.autoBackgroundImg = numpy.float32(frame)
                rawFrame = frame.copy()
                # First work out the region of interest by
                #    subtracting the fixed background image
                #    to create a mask.
                #print frame
                #print self._background_depth_img
                absDiff = cv2.absdiff(frame, self._background_depth_img)
                benMask, maskArea = filters.getBenMask(absDiff, 8)

                cv2.accumulateWeighted(frame, self.autoBackgroundImg, 0.05)
                # Convert the background image into the same format
                # as the main frame.
                #bg = self.autoBackgroundImg
                bg = cv2.convertScaleAbs(self.autoBackgroundImg, alpha=1.0)
                # Subtract the background from the frame image
                cv2.absdiff(frame, bg, frame)
                # Scale the difference image to make it more sensitive
                # to changes.
                cv2.convertScaleAbs(frame, frame, alpha=100)
                # Apply the mask so we only see the test subject.
                frame = cv2.multiply(frame, benMask, dst=frame, dtype=-1)

                if (maskArea <= self.cfg.getConfigInt('area_threshold')):
                    bri = (0, 0, 0)
                else:
                    # Calculate the brightness of the test subject.
                    bri = filters.getMean(frame, benMask)

                # Add the brightness to the time series ready for analysis.
                self._ts.addSamp(bri[0])
                self._ts.addImg(rawFrame)

                # Write timeseries to a file every 'output_framecount' frames.
                if (self._outputFrameCount >=
                        self.cfg.getConfigInt('output_framecount')):
                    # Write timeseries to file
                    self._ts.writeToFile("%s/%s" % \
                        ( self.cfg.getConfigStr('output_directory'),
                          self.cfg.getConfigStr('ts_fname')
                      ))
                    self._outputFrameCount = 0
                else:
                    self._outputFrameCount = self._outputFrameCount + 1

                # Only do the analysis every 15 frames (0.5 sec), or whatever
                # is specified in configuration file analysis_framecount
                # parameter.
                if (self._frameCount <
                        self.cfg.getConfigInt('analysis_framecount')):
                    self._frameCount = self._frameCount + 1
                else:
                    # Look for peaks in the brightness (=movement).
                    self._nPeaks, self._ts_time, self._rate = self._ts.findPeaks(
                    )
                    #print "%d peaks in %3.2f sec = %3.1f bpm" % \
                    #    (nPeaks,ts_time,rate)

                    oldStatus = self._status
                    if (maskArea > self.cfg.getConfigInt('area_threshold')):
                        # Check for alarm levels
                        if (self._rate > self.cfg.getConfigInt("rate_warn")):
                            self._status = self.ALARM_STATUS_OK
                        elif (self._rate >
                              self.cfg.getConfigInt("rate_alarm")):
                            self._status = self.ALARM_STATUS_WARN
                        else:
                            self._status = self.ALARM_STATUS_FULL
                    else:
                        self._status = self.ALARM_STATUS_NOT_FOUND


                    if (oldStatus == self.ALARM_STATUS_OK and
                        self._status == self.ALARM_STATUS_WARN) or \
                        (oldStatus == self.ALARM_STATUS_WARN and
                         self._status == self.ALARM_STATUS_FULL):
                        # Write timeseries to file
                        self._ts.writeToFile("%s/%s" % \
                            ( self.cfg.getConfigStr('output_directory'),
                              self.cfg.getConfigStr('alarm_ts_fname')
                          ),bgImg=self._background_depth_img)

                    # Collect the analysis results together and send them
                    # to the web server.
                    resultsDict = {}
                    resultsDict['fps'] = "%3.0f" % self.fps
                    resultsDict['bri'] = "%4.0f" % self._ts.mean
                    resultsDict['area'] = "%6.0f" % maskArea
                    resultsDict['nPeaks'] = "%d" % self._nPeaks
                    resultsDict['ts_time'] = self._ts_time
                    resultsDict['rate'] = "%d" % self._rate
                    resultsDict['time_t'] = time.ctime()
                    resultsDict['status'] = self._status
                    self._ws.setAnalysisResults(resultsDict)

                    # Write the results to file as a json string
                    utils.writeJSON(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     self.cfg.getConfigStr("data_fname")))
                    utils.writeLog(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     "benFinder_alarms.log"))
                    # Plot the graph of brightness, and save the images
                    # to disk.
                    self._ts.plotRawData(
                        file=True,
                        fname="%s/%s" % \
                        (self._tmpdir,self.cfg.getConfigStr("chart_fname")))

                    cv2.imwrite(
                        "%s/%s" % (self._tmpdir,
                                   self.cfg.getConfigStr("raw_image_fname")),
                        rawFrame)
                    cv2.imwrite(
                        "%s/%s" %
                        (self._tmpdir,
                         self.cfg.getConfigStr("masked_image_fname")), frame)
                    self._frameCount = 0
            else:
                print "Null frame received - assuming end of file and exiting"
                break
            self._captureManager.exitFrame()

    @property
    def fps(self):
        return self._captureManager.fps

    @property
    def nPeaks(self):
        return self._nPeaks

    @property
    def ts_time(self):
        return self._ts_time

    @property
    def rate(self):
        return self._rate

    @property
    def rawImgFname(self):
        return self.cfg.getConfigStr("raw_image_fname")

    @property
    def maskedImgFname(self):
        return self.cfg.getConfigStr("masked_image_fname")

    @property
    def chartImgFname(self):
        return self.cfg.getConfigStr("chart_fname")

    def saveBgImg(self):
        """ Write a new background image to the appropriate file location."""
        if (self._captureManager.hasEnteredFrame):
            self._captureManager.exitFrame()
        self._captureManager.enterFrame()
        print "Writing image to %s." % self.cfg.getConfigStr(
            "background_depth")
        self._captureManager.writeImage(
            "%s/%s" % (self._wkdir, self.cfg.getConfigStr("background_depth")))
        print self._captureManager.frame
        print self._captureManager.frame.dtype
        self._captureManager.exitFrame()
        self.loadBgImg()

    def loadBgImg(self):
        print "Loading background image %s/%s." % \
            (self._wkdir,self.cfg.getConfigStr("background_depth"))
        self._background_depth_img = cv2.imread("%s/%s" % \
                    (self._wkdir,self.cfg.getConfigStr("background_depth")),
                                                cv2.CV_LOAD_IMAGE_GRAYSCALE)
        #                                        cv2.CV_LOAD_IMAGE_UNCHANGED)
        print self._background_depth_img
        print self._background_depth_img.dtype
コード例 #4
0
class BenFinder(object):
    BACKGROUND_VIDEO_FNAME = "background_video.png"
    BACKGROUND_DEPTH_FNAME = "background_depth.png"
 
    def __init__(self):
        self._windowManager = WindowManager('benFinder',
                                             self.onKeypress)
        device = depth.CV_CAP_FREENECT
        #device = 1
        print "device=%d" % device
        self._captureManager = CaptureManager(
            device, self._windowManager, True)
        self._captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False
        self._backgroundSubtract = False
        self._autoBackgroundSubtract = False
        self._curveFilter = filters.BGRPortraCurveFilter()
        self.background_video_img = None
        self.background_depth_img = None
        self.autoBackgroundImg = None
        self._ts = TimeSeries()
        self._frameCount = 0
    
    def loadBackgroundImages(self):
        """ Load the background images to be used for background subtraction
        from disk files.
        """
        self.background_video_img = cv2.imread(BenFinder.BACKGROUND_VIDEO_FNAME)
        self.background_depth_img = cv2.imread(BenFinder.BACKGROUND_DEPTH_FNAME,
                                               cv2.CV_LOAD_IMAGE_GRAYSCALE)

    def showBackgroundImage(self):
        """ Display the background image used for subtraction in a separate window
        """
        # Load the images from disk if necessary.
        if (not self.background_depth_img or not self.background_video_img):
            self.loadBackgroundImages()
        # Display the correct image
        if (self._autoBackgroundSubtract):
            cv2.imshow("Auto Background Image", self.autoBackgroundImg)
        else:
            if (self._captureManager.channel == \
                depth.CV_CAP_OPENNI_DEPTH_MAP):
                cv2.imshow("background_depth_img",self.background_depth_img)
            elif (self._captureManager.channel == \
                  depth.CV_CAP_OPENNI_BGR_IMAGE):
                cv2.imshow("background_video_img",self.background_video_img)
            else:
                print "Error - Invalid Channel %d." % \
                    self._captureManager.channel

    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()

            frame = self._captureManager.frame
            
            if frame is not None:
                if (self._backgroundSubtract):
                    if (self._autoBackgroundSubtract):
                        if (self._captureManager.channel == \
                            depth.CV_CAP_OPENNI_DEPTH_MAP):
                            if (self.autoBackgroundImg == None):
                                self.autoBackgroundImg = numpy.float32(frame)
                            # First work out the region of interest by 
                            #    subtracting the fixed background image 
                            #    to create a mask.
                            absDiff = cv2.absdiff(frame,self.background_depth_img)
                            benMask,maskArea = filters.getBenMask(absDiff,8)

                            cv2.accumulateWeighted(frame,
                                                   self.autoBackgroundImg,
                                                   0.05)
                            # Convert the background image into the same format
                            # as the main frame.
                            bg = cv2.convertScaleAbs(self.autoBackgroundImg,
                                                     alpha=1.0)
                            # Subtract the background from the frame image
                            cv2.absdiff(frame,bg,frame)
                            # Scale the difference image to make it more sensitive
                            # to changes.
                            cv2.convertScaleAbs(frame,frame,alpha=100)
                            #frame = cv2.bitwise_and(frame,frame,dst=frame,mask=benMask)
                            frame = cv2.multiply(frame,benMask,dst=frame,dtype=-1)
                            bri = filters.getMean(frame,benMask)
                            #print "%4.0f, %3.0f" % (bri[0],self._captureManager.fps)
                            self._ts.addSamp(bri[0])
                            if (self._frameCount < 15):
                                self._frameCount = self._frameCount +1
                            else:
                                self._ts.plotRawData()
                                self._ts.findPeaks()
                                self._frameCount = 0
                        else:
                            print "Auto background subtract only works for depth images!"
                    else:
                        if (self._captureManager.channel == \
                            depth.CV_CAP_OPENNI_DEPTH_MAP):
                            cv2.absdiff(frame,self.background_depth_img,frame)
                            benMask = filters.getBenMask(frame,8)
                            bri = filters.getMean(frame,benMask)
                            print bri
                        elif (self._captureManager.channel == \
                              depth.CV_CAP_OPENNI_BGR_IMAGE):
                            cv2.absdiff(frame,self.background_video_img,frame)
                        else:
                            print "Error - Invalid Channel %d." % \
                                self._captureManager.channel
                    #ret,frame = cv2.threshold(frame,200,255,cv2.THRESH_TOZERO)
                #self._faceTracker.update(frame)
                #faces = self._faceTracker.faces

                #if self._shouldDrawDebugRects:
                #    self._faceTracker.drawDebugRects(frame)
                            
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
    
    def onKeypress(self, keycode):
        """Handle a keypress.
        
        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        x      -> Start/stop drawing debug rectangles around faces.
        a      -> toggle automatic accumulated background subtraction on or off.
        b      -> toggle simple background subtraction on or off.
        s      -> Save current frame as background image.
        d      -> Toggle between video and depth map view
        i      -> Display the background image that is being used for subtraction.
        escape -> Quit.
        
        """
        print "keycode=%d" % keycode
        if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: # tab
            if not self._captureManager.isWritingVideo:
                print "Starting Video Recording..."
                self._captureManager.startWritingVideo(
                    'screencast.avi')
            else:
                print "Stopping video recording"
                self._captureManager.stopWritingVideo()
        elif keycode == 120: # x
            self._shouldDrawDebugRects = \
                not self._shouldDrawDebugRects
        elif (chr(keycode)=='a'):  # Autometic background subtraction
            if (self._autoBackgroundSubtract == True):
                print "Switching off auto background Subtraction"
                self.autoBackgroundImage = None
                self._autoBackgroundSubtract = False
            else:
                print "Switching on auto background subtraction"
                self._autoBackgroundSubtract = True
        elif (chr(keycode)=='b'):  # Simple background subtraction
            if (self._backgroundSubtract == True):
                print "Switching off background Subtraction"
                self._backgroundSubtract = False
            else:
                print "Switching on background subtraction"
                self.loadBackgroundImages()
                self._backgroundSubtract = True
        elif (chr(keycode)=='d'):
            if (self._captureManager.channel == depth.CV_CAP_OPENNI_BGR_IMAGE):
                print "switching to depth map..."
                self._captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP
            else:
                print "switching to video"
                self._captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE
        elif (chr(keycode)=='i'):
            self.showBackgroundImage()
        elif (chr(keycode)=='s'):
            print "Saving Background Image"
            if (self._captureManager.channel == depth.CV_CAP_OPENNI_DEPTH_MAP):
                self._captureManager.writeImage(BenFinder.BACKGROUND_DEPTH_FNAME)
            elif (self._captureManager.channel == depth.CV_CAP_OPENNI_BGR_IMAGE):
                self._captureManager.writeImage(BenFinder.BACKGROUND_VIDEO_FNAME)
            else:
                print "Invalid Channel %d - doing nothing!" \
                    % self._captureManager.channel
                

        elif keycode == 27: # escape
            self._windowManager.destroyWindow()
コード例 #5
0
    def __init__(self,save=False, inFile = None):
        print "benFinder.__init__()"
        print os.path.realpath(__file__)
        configPath = "%s/%s" % (os.path.dirname(os.path.realpath(__file__)),
                                self.configFname)
        print configPath
        self.cfg = ConfigUtil(configPath,self.configSection)

        self.debug = self.cfg.getConfigBool("debug")
        if (self.debug): print "Debug Mode"

        self._wkdir = self.cfg.getConfigStr("working_directory")
        if (self.debug): print "working_directory=%s\n" % self._wkdir
        self._tmpdir = self.cfg.getConfigStr("tmpdir")
        if (self.debug): print "tmpdir=%s\n" % self._tmpdir


        # Check if we are running from live kinect or a file.
        if (inFile):
            device = depth.CV_CAP_FILE
        else:
            device = depth.CV_CAP_FREENECT

        # Initialise the captureManager
        self._captureManager = CaptureManager(
            device, None, True, inFile=inFile)
        self._captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP

        # If we are runnign from a file, use the first frame as the
        # background image.
        if (inFile):
            self.saveBgImg()

        # If we have asked to save the background image, do that, and exit,
        # otherwise initialise the seizure detector.
        if (save):
            self.saveBgImg()
        else:
            self.loadBgImg()
            self.autoBackgroundImg = None
            self._status = self.ALARM_STATUS_OK
            self._ts = TimeSeries(tslen=self.cfg.getConfigInt("timeseries_length"))
            self._frameCount = 0
            self._outputFrameCount = 0
            self._nPeaks = 0
            self._ts_time = 0
            self._rate = 0
            self._ws = webServer.benWebServer(self)
            self._ws.setBgImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("background_depth")))
            self._ws.setChartImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("chart_fname")))
            self._ws.setRawImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("raw_image_fname")))
            self._ws.setMaskedImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("masked_image_fname")))
            self._ws.setDataFname("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("data_fname")))
            self._ws.setAnalysisResults({})
            webServer.setRoutes(self._ws)
            self.run()
コード例 #6
0
class BenFinder(object):
    configFname = "config.ini"
    configSection = "benFinder"

    ALARM_STATUS_OK = 0   # All ok, no alarms.
    ALARM_STATUS_WARN = 1 # Warning status
    ALARM_STATUS_FULL = 2 # Full alarm status. 
    ALARM_STATUS_NOT_FOUND = 3 # Benjamin not found in image 
                               # (area below config area_threshold parameter)

    def __init__(self,save=False, inFile = None):
        print "benFinder.__init__()"
        print os.path.realpath(__file__)
        configPath = "%s/%s" % (os.path.dirname(os.path.realpath(__file__)),
                                self.configFname)
        print configPath
        self.cfg = ConfigUtil(configPath,self.configSection)

        self.debug = self.cfg.getConfigBool("debug")
        if (self.debug): print "Debug Mode"

        self._wkdir = self.cfg.getConfigStr("working_directory")
        if (self.debug): print "working_directory=%s\n" % self._wkdir
        self._tmpdir = self.cfg.getConfigStr("tmpdir")
        if (self.debug): print "tmpdir=%s\n" % self._tmpdir


        # Check if we are running from live kinect or a file.
        if (inFile):
            device = depth.CV_CAP_FILE
        else:
            device = depth.CV_CAP_FREENECT

        # Initialise the captureManager
        self._captureManager = CaptureManager(
            device, None, True, inFile=inFile)
        self._captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP

        # If we are runnign from a file, use the first frame as the
        # background image.
        if (inFile):
            self.saveBgImg()

        # If we have asked to save the background image, do that, and exit,
        # otherwise initialise the seizure detector.
        if (save):
            self.saveBgImg()
        else:
            self.loadBgImg()
            self.autoBackgroundImg = None
            self._status = self.ALARM_STATUS_OK
            self._ts = TimeSeries(tslen=self.cfg.getConfigInt("timeseries_length"))
            self._frameCount = 0
            self._outputFrameCount = 0
            self._nPeaks = 0
            self._ts_time = 0
            self._rate = 0
            self._ws = webServer.benWebServer(self)
            self._ws.setBgImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("background_depth")))
            self._ws.setChartImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("chart_fname")))
            self._ws.setRawImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("raw_image_fname")))
            self._ws.setMaskedImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("masked_image_fname")))
            self._ws.setDataFname("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("data_fname")))
            self._ws.setAnalysisResults({})
            webServer.setRoutes(self._ws)
            self.run()
    
    def run(self):
        """Run the main loop."""
        while(True):
            self._captureManager.enterFrame()

            frame = self._captureManager.frame
            
            if frame is not None:
                if (self.autoBackgroundImg == None):
                    self.autoBackgroundImg = numpy.float32(frame)
                rawFrame = frame.copy()
                # First work out the region of interest by 
                #    subtracting the fixed background image 
                #    to create a mask.
                #print frame
                #print self._background_depth_img
                absDiff = cv2.absdiff(frame,self._background_depth_img)
                benMask,maskArea = filters.getBenMask(absDiff,8)

                cv2.accumulateWeighted(frame,
                                       self.autoBackgroundImg,0.05)
                # Convert the background image into the same format
                # as the main frame.
                #bg = self.autoBackgroundImg
                bg = cv2.convertScaleAbs(self.autoBackgroundImg,
                                         alpha=1.0)
                # Subtract the background from the frame image
                cv2.absdiff(frame,bg,frame)
                # Scale the difference image to make it more sensitive
                # to changes.
                cv2.convertScaleAbs(frame,frame,alpha=100)
                # Apply the mask so we only see the test subject.
                frame = cv2.multiply(frame,benMask,dst=frame,dtype=-1)

                if (maskArea <= self.cfg.getConfigInt('area_threshold')):
                    bri=(0,0,0)
                else:
                    # Calculate the brightness of the test subject.
                    bri = filters.getMean(frame,benMask)

                # Add the brightness to the time series ready for analysis.
                self._ts.addSamp(bri[0])
                self._ts.addImg(rawFrame)

                # Write timeseries to a file every 'output_framecount' frames.
                if (self._outputFrameCount >= self.cfg.getConfigInt('output_framecount')):
                    # Write timeseries to file
                    self._ts.writeToFile("%s/%s" % \
                        ( self.cfg.getConfigStr('output_directory'),
                          self.cfg.getConfigStr('ts_fname')
                      ))
                    self._outputFrameCount = 0
                else:
                    self._outputFrameCount = self._outputFrameCount + 1
                    

                # Only do the analysis every 15 frames (0.5 sec), or whatever
                # is specified in configuration file analysis_framecount
                # parameter.
                if (self._frameCount < self.cfg.getConfigInt('analysis_framecount')):
                    self._frameCount = self._frameCount +1
                else:
                    # Look for peaks in the brightness (=movement).
                    self._nPeaks,self._ts_time,self._rate = self._ts.findPeaks()
                    #print "%d peaks in %3.2f sec = %3.1f bpm" % \
                    #    (nPeaks,ts_time,rate)

                    oldStatus = self._status
                    if (maskArea > self.cfg.getConfigInt('area_threshold')):
                        # Check for alarm levels
                        if (self._rate > self.cfg.getConfigInt(
                                "rate_warn")):
                            self._status= self.ALARM_STATUS_OK
                        elif (self._rate > self.cfg.getConfigInt(
                                "rate_alarm")):
                            self._status= self.ALARM_STATUS_WARN
                        else:
                            self._status= self.ALARM_STATUS_FULL
                    else:
                        self._status = self.ALARM_STATUS_NOT_FOUND


                    if (oldStatus == self.ALARM_STATUS_OK and
                        self._status == self.ALARM_STATUS_WARN) or \
                        (oldStatus == self.ALARM_STATUS_WARN and 
                         self._status == self.ALARM_STATUS_FULL):
                                # Write timeseries to file
                                self._ts.writeToFile("%s/%s" % \
                                    ( self.cfg.getConfigStr('output_directory'),
                                      self.cfg.getConfigStr('alarm_ts_fname')
                                  ),bgImg=self._background_depth_img)
                        

                    # Collect the analysis results together and send them
                    # to the web server.
                    resultsDict = {}
                    resultsDict['fps'] = "%3.0f" % self.fps
                    resultsDict['bri'] = "%4.0f" % self._ts.mean
                    resultsDict['area'] = "%6.0f" % maskArea
                    resultsDict['nPeaks'] = "%d" % self._nPeaks
                    resultsDict['ts_time'] = self._ts_time
                    resultsDict['rate'] = "%d" % self._rate
                    resultsDict['time_t'] = time.ctime()
                    resultsDict['status'] = self._status
                    self._ws.setAnalysisResults(resultsDict)

                    # Write the results to file as a json string
                    utils.writeJSON(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     self.cfg.getConfigStr("data_fname")))
                    utils.writeLog(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     "benFinder_alarms.log"))
                    # Plot the graph of brightness, and save the images
                    # to disk.
                    self._ts.plotRawData(
                        file=True,
                        fname="%s/%s" % \
                        (self._tmpdir,self.cfg.getConfigStr("chart_fname")))
                        
                    cv2.imwrite("%s/%s" % (self._tmpdir,
                                           self.cfg.getConfigStr(
                                               "raw_image_fname")),
                                rawFrame)
                    cv2.imwrite("%s/%s" % (self._tmpdir,self.cfg.getConfigStr(
                        "masked_image_fname")),
                        frame)
                    self._frameCount = 0
            else:
                print "Null frame received - assuming end of file and exiting"
                break
            self._captureManager.exitFrame()
                

    @property
    def fps(self):
        return self._captureManager.fps

    @property
    def nPeaks(self):
        return self._nPeaks

    @property
    def ts_time(self):
        return self._ts_time

    @property
    def rate(self):
        return self._rate

    @property
    def rawImgFname(self):
        return self.cfg.getConfigStr("raw_image_fname")

    @property
    def maskedImgFname(self):
        return self.cfg.getConfigStr("masked_image_fname")

    @property
    def chartImgFname(self):
        return self.cfg.getConfigStr("chart_fname")

    def saveBgImg(self):
        """ Write a new background image to the appropriate file location."""
        if (self._captureManager.hasEnteredFrame):
            self._captureManager.exitFrame()
        self._captureManager.enterFrame()
        print "Writing image to %s." % self.cfg.getConfigStr("background_depth")
        self._captureManager.writeImage("%s/%s" % 
                                        (self._wkdir,
                                         self.cfg.getConfigStr("background_depth")
                                     ))
        print self._captureManager.frame
        print self._captureManager.frame.dtype
        self._captureManager.exitFrame()
        self.loadBgImg()

    def loadBgImg(self):
        print "Loading background image %s/%s." % \
            (self._wkdir,self.cfg.getConfigStr("background_depth"))
        self._background_depth_img = cv2.imread("%s/%s" % \
                    (self._wkdir,self.cfg.getConfigStr("background_depth")),
                                                cv2.CV_LOAD_IMAGE_GRAYSCALE)
        #                                        cv2.CV_LOAD_IMAGE_UNCHANGED)
        print self._background_depth_img
        print self._background_depth_img.dtype