コード例 #1
0
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()

            frame = self._captureManager.frame
            
            if frame is not None:
                if (self._backgroundSubtract):
                    if (self._autoBackgroundSubtract):
                        if (self._captureManager.channel == \
                            depth.CV_CAP_OPENNI_DEPTH_MAP):
                            if (self.autoBackgroundImg == None):
                                self.autoBackgroundImg = numpy.float32(frame)
                            # First work out the region of interest by 
                            #    subtracting the fixed background image 
                            #    to create a mask.
                            absDiff = cv2.absdiff(frame,self.background_depth_img)
                            benMask,maskArea = filters.getBenMask(absDiff,8)

                            cv2.accumulateWeighted(frame,
                                                   self.autoBackgroundImg,
                                                   0.05)
                            # Convert the background image into the same format
                            # as the main frame.
                            bg = cv2.convertScaleAbs(self.autoBackgroundImg,
                                                     alpha=1.0)
                            # Subtract the background from the frame image
                            cv2.absdiff(frame,bg,frame)
                            # Scale the difference image to make it more sensitive
                            # to changes.
                            cv2.convertScaleAbs(frame,frame,alpha=100)
                            #frame = cv2.bitwise_and(frame,frame,dst=frame,mask=benMask)
                            frame = cv2.multiply(frame,benMask,dst=frame,dtype=-1)
                            bri = filters.getMean(frame,benMask)
                            #print "%4.0f, %3.0f" % (bri[0],self._captureManager.fps)
                            self._ts.addSamp(bri[0])
                            if (self._frameCount < 15):
                                self._frameCount = self._frameCount +1
                            else:
                                self._ts.plotRawData()
                                self._ts.findPeaks()
                                self._frameCount = 0
                        else:
                            print "Auto background subtract only works for depth images!"
                    else:
                        if (self._captureManager.channel == \
                            depth.CV_CAP_OPENNI_DEPTH_MAP):
                            cv2.absdiff(frame,self.background_depth_img,frame)
                            benMask = filters.getBenMask(frame,8)
                            bri = filters.getMean(frame,benMask)
                            print bri
                        elif (self._captureManager.channel == \
                              depth.CV_CAP_OPENNI_BGR_IMAGE):
                            cv2.absdiff(frame,self.background_video_img,frame)
                        else:
                            print "Error - Invalid Channel %d." % \
                                self._captureManager.channel
                    #ret,frame = cv2.threshold(frame,200,255,cv2.THRESH_TOZERO)
                #self._faceTracker.update(frame)
                #faces = self._faceTracker.faces

                #if self._shouldDrawDebugRects:
                #    self._faceTracker.drawDebugRects(frame)
                            
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
コード例 #2
0
    def run(self):
        """Run the main loop."""
        while (True):
            self._captureManager.enterFrame()

            frame = self._captureManager.frame

            if frame is not None:
                if (self.autoBackgroundImg == None):
                    self.autoBackgroundImg = numpy.float32(frame)
                rawFrame = frame.copy()
                # First work out the region of interest by
                #    subtracting the fixed background image
                #    to create a mask.
                #print frame
                #print self._background_depth_img
                absDiff = cv2.absdiff(frame, self._background_depth_img)
                benMask, maskArea = filters.getBenMask(absDiff, 8)

                cv2.accumulateWeighted(frame, self.autoBackgroundImg, 0.05)
                # Convert the background image into the same format
                # as the main frame.
                #bg = self.autoBackgroundImg
                bg = cv2.convertScaleAbs(self.autoBackgroundImg, alpha=1.0)
                # Subtract the background from the frame image
                cv2.absdiff(frame, bg, frame)
                # Scale the difference image to make it more sensitive
                # to changes.
                cv2.convertScaleAbs(frame, frame, alpha=100)
                # Apply the mask so we only see the test subject.
                frame = cv2.multiply(frame, benMask, dst=frame, dtype=-1)

                if (maskArea <= self.cfg.getConfigInt('area_threshold')):
                    bri = (0, 0, 0)
                else:
                    # Calculate the brightness of the test subject.
                    bri = filters.getMean(frame, benMask)

                # Add the brightness to the time series ready for analysis.
                self._ts.addSamp(bri[0])
                self._ts.addImg(rawFrame)

                # Write timeseries to a file every 'output_framecount' frames.
                if (self._outputFrameCount >=
                        self.cfg.getConfigInt('output_framecount')):
                    # Write timeseries to file
                    self._ts.writeToFile("%s/%s" % \
                        ( self.cfg.getConfigStr('output_directory'),
                          self.cfg.getConfigStr('ts_fname')
                      ))
                    self._outputFrameCount = 0
                else:
                    self._outputFrameCount = self._outputFrameCount + 1

                # Only do the analysis every 15 frames (0.5 sec), or whatever
                # is specified in configuration file analysis_framecount
                # parameter.
                if (self._frameCount <
                        self.cfg.getConfigInt('analysis_framecount')):
                    self._frameCount = self._frameCount + 1
                else:
                    # Look for peaks in the brightness (=movement).
                    self._nPeaks, self._ts_time, self._rate = self._ts.findPeaks(
                    )
                    #print "%d peaks in %3.2f sec = %3.1f bpm" % \
                    #    (nPeaks,ts_time,rate)

                    oldStatus = self._status
                    if (maskArea > self.cfg.getConfigInt('area_threshold')):
                        # Check for alarm levels
                        if (self._rate > self.cfg.getConfigInt("rate_warn")):
                            self._status = self.ALARM_STATUS_OK
                        elif (self._rate >
                              self.cfg.getConfigInt("rate_alarm")):
                            self._status = self.ALARM_STATUS_WARN
                        else:
                            self._status = self.ALARM_STATUS_FULL
                    else:
                        self._status = self.ALARM_STATUS_NOT_FOUND


                    if (oldStatus == self.ALARM_STATUS_OK and
                        self._status == self.ALARM_STATUS_WARN) or \
                        (oldStatus == self.ALARM_STATUS_WARN and
                         self._status == self.ALARM_STATUS_FULL):
                        # Write timeseries to file
                        self._ts.writeToFile("%s/%s" % \
                            ( self.cfg.getConfigStr('output_directory'),
                              self.cfg.getConfigStr('alarm_ts_fname')
                          ),bgImg=self._background_depth_img)

                    # Collect the analysis results together and send them
                    # to the web server.
                    resultsDict = {}
                    resultsDict['fps'] = "%3.0f" % self.fps
                    resultsDict['bri'] = "%4.0f" % self._ts.mean
                    resultsDict['area'] = "%6.0f" % maskArea
                    resultsDict['nPeaks'] = "%d" % self._nPeaks
                    resultsDict['ts_time'] = self._ts_time
                    resultsDict['rate'] = "%d" % self._rate
                    resultsDict['time_t'] = time.ctime()
                    resultsDict['status'] = self._status
                    self._ws.setAnalysisResults(resultsDict)

                    # Write the results to file as a json string
                    utils.writeJSON(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     self.cfg.getConfigStr("data_fname")))
                    utils.writeLog(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     "benFinder_alarms.log"))
                    # Plot the graph of brightness, and save the images
                    # to disk.
                    self._ts.plotRawData(
                        file=True,
                        fname="%s/%s" % \
                        (self._tmpdir,self.cfg.getConfigStr("chart_fname")))

                    cv2.imwrite(
                        "%s/%s" % (self._tmpdir,
                                   self.cfg.getConfigStr("raw_image_fname")),
                        rawFrame)
                    cv2.imwrite(
                        "%s/%s" %
                        (self._tmpdir,
                         self.cfg.getConfigStr("masked_image_fname")), frame)
                    self._frameCount = 0
            else:
                print "Null frame received - assuming end of file and exiting"
                break
            self._captureManager.exitFrame()
コード例 #3
0
    def run(self):
        """Run the main loop."""
        while(True):
            self._captureManager.enterFrame()

            frame = self._captureManager.frame
            
            if frame is not None:
                if (self.autoBackgroundImg == None):
                    self.autoBackgroundImg = numpy.float32(frame)
                rawFrame = frame.copy()
                # First work out the region of interest by 
                #    subtracting the fixed background image 
                #    to create a mask.
                #print frame
                #print self._background_depth_img
                absDiff = cv2.absdiff(frame,self._background_depth_img)
                benMask,maskArea = filters.getBenMask(absDiff,8)

                cv2.accumulateWeighted(frame,
                                       self.autoBackgroundImg,0.05)
                # Convert the background image into the same format
                # as the main frame.
                #bg = self.autoBackgroundImg
                bg = cv2.convertScaleAbs(self.autoBackgroundImg,
                                         alpha=1.0)
                # Subtract the background from the frame image
                cv2.absdiff(frame,bg,frame)
                # Scale the difference image to make it more sensitive
                # to changes.
                cv2.convertScaleAbs(frame,frame,alpha=100)
                # Apply the mask so we only see the test subject.
                frame = cv2.multiply(frame,benMask,dst=frame,dtype=-1)

                if (maskArea <= self.cfg.getConfigInt('area_threshold')):
                    bri=(0,0,0)
                else:
                    # Calculate the brightness of the test subject.
                    bri = filters.getMean(frame,benMask)

                # Add the brightness to the time series ready for analysis.
                self._ts.addSamp(bri[0])
                self._ts.addImg(rawFrame)

                # Write timeseries to a file every 'output_framecount' frames.
                if (self._outputFrameCount >= self.cfg.getConfigInt('output_framecount')):
                    # Write timeseries to file
                    self._ts.writeToFile("%s/%s" % \
                        ( self.cfg.getConfigStr('output_directory'),
                          self.cfg.getConfigStr('ts_fname')
                      ))
                    self._outputFrameCount = 0
                else:
                    self._outputFrameCount = self._outputFrameCount + 1
                    

                # Only do the analysis every 15 frames (0.5 sec), or whatever
                # is specified in configuration file analysis_framecount
                # parameter.
                if (self._frameCount < self.cfg.getConfigInt('analysis_framecount')):
                    self._frameCount = self._frameCount +1
                else:
                    # Look for peaks in the brightness (=movement).
                    self._nPeaks,self._ts_time,self._rate = self._ts.findPeaks()
                    #print "%d peaks in %3.2f sec = %3.1f bpm" % \
                    #    (nPeaks,ts_time,rate)

                    oldStatus = self._status
                    if (maskArea > self.cfg.getConfigInt('area_threshold')):
                        # Check for alarm levels
                        if (self._rate > self.cfg.getConfigInt(
                                "rate_warn")):
                            self._status= self.ALARM_STATUS_OK
                        elif (self._rate > self.cfg.getConfigInt(
                                "rate_alarm")):
                            self._status= self.ALARM_STATUS_WARN
                        else:
                            self._status= self.ALARM_STATUS_FULL
                    else:
                        self._status = self.ALARM_STATUS_NOT_FOUND


                    if (oldStatus == self.ALARM_STATUS_OK and
                        self._status == self.ALARM_STATUS_WARN) or \
                        (oldStatus == self.ALARM_STATUS_WARN and 
                         self._status == self.ALARM_STATUS_FULL):
                                # Write timeseries to file
                                self._ts.writeToFile("%s/%s" % \
                                    ( self.cfg.getConfigStr('output_directory'),
                                      self.cfg.getConfigStr('alarm_ts_fname')
                                  ),bgImg=self._background_depth_img)
                        

                    # Collect the analysis results together and send them
                    # to the web server.
                    resultsDict = {}
                    resultsDict['fps'] = "%3.0f" % self.fps
                    resultsDict['bri'] = "%4.0f" % self._ts.mean
                    resultsDict['area'] = "%6.0f" % maskArea
                    resultsDict['nPeaks'] = "%d" % self._nPeaks
                    resultsDict['ts_time'] = self._ts_time
                    resultsDict['rate'] = "%d" % self._rate
                    resultsDict['time_t'] = time.ctime()
                    resultsDict['status'] = self._status
                    self._ws.setAnalysisResults(resultsDict)

                    # Write the results to file as a json string
                    utils.writeJSON(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     self.cfg.getConfigStr("data_fname")))
                    utils.writeLog(resultsDict,"%s/%s" % \
                                    (self._tmpdir,
                                     "benFinder_alarms.log"))
                    # Plot the graph of brightness, and save the images
                    # to disk.
                    self._ts.plotRawData(
                        file=True,
                        fname="%s/%s" % \
                        (self._tmpdir,self.cfg.getConfigStr("chart_fname")))
                        
                    cv2.imwrite("%s/%s" % (self._tmpdir,
                                           self.cfg.getConfigStr(
                                               "raw_image_fname")),
                                rawFrame)
                    cv2.imwrite("%s/%s" % (self._tmpdir,self.cfg.getConfigStr(
                        "masked_image_fname")),
                        frame)
                    self._frameCount = 0
            else:
                print "Null frame received - assuming end of file and exiting"
                break
            self._captureManager.exitFrame()