def waitUntilListen(self):
     self.logger.debug(
         "waitUntilListen function is now monitoring the ENVIRON listen variable"
     )
     self.interrupted = False
     while True:
         self.logger.debug("waitUntilListen - listen variable is %s" %
                           str(self.ENVIRON['listen']))
         if busyCheck(self.ENVIRON, self.logger) == False:
             self.passiveListen()
         time.sleep(1)
    def startListenningActively(self):
        #Lee AutoLevel - display the current average noise level
        self.logger.debug("Current avg_noise is %s" %
                          self.ENVIRON["avg_noise"])

        if busyCheck(self.ENVIRON, self.logger) == True:
            self.logger.debug("KEYWORD DETECTED. But we are busy so ignore it")
        else:
            # set system to indicate things are busy
            busyOn(self.ENVIRON, self.logger)
            self.logger.debug("KEYWORD DETECTED. Beginning active listen ")
            self.activeListen()
        #go back to passive listening once ENVIRON["listen"] indicates it is OK
        self.waitUntilListen()
Example #3
0
    def detectionEvent(self, lastAlert, camera):
        # Check lastAlert to see if we need to trigger a new Alert for the motion
        curDTime = datetime.datetime.today()
        self.logger.debug("Motion detected at %s " % curDTime)
        diff = curDTime - lastAlert
        if (diff.seconds) < self.delay:
            self.logger.debug(
                "Motion delay has not expired. %s seconds remaining." %
                str(self.delay - diff.seconds))
        else:
            lastAlert = curDTime
            self.logger.debug(
                "Motion detected at %s and motion delay has expired" %
                curDTime)

            # Check listen loop to ensure brain is not busy with an event
            #if self.ENVIRON["listen"] == True:
            if busyCheck(self.ENVIRON, self.logger) == False:
                command = None
                # if in security camera mode then capture video
                #----------------------------------------------
                if self.ENVIRON["security"] == True:
                    self.logger.debug(
                        "Security mode is enabled so trigger security alert")
                    self.securityWarn(camera)
                # else check whether we should begin a chat loop
                #----------------------------------------------
                else:
                    self.logger.debug("Checking if we should trigger a chat")
                    diff = 0
                    # only trigger a chat if we have a delay and a chat ID
                    if self.chatDelay > 0 and len(self.motionChat) > 0:
                        diff = curDTime - self.lastChat
                    if diff > datetime.timedelta(minutes=self.chatDelay):
                        self.lastChat = curDTime
                        command = 'CHATBOT:%s' % self.motionChat
                        # set system to indicate things are busy
                        busyOn(self.ENVIRON, self.logger)
                        self.logger.debug("Posting %s to Queue" % command)
                        self.SENSORQ.put(['brain', command])
        # pre-fetch the security chat text to regenerate file
        self.createFile()
        return lastAlert
Example #4
0
    def detectMotion(self):
        self.logger.debug("Starting to detect Motion")
        # define feed from camera
        camera = cv2.VideoCapture(0)
        time.sleep(1)
        # initialize variables used by the motion sensing
        firstFrame = None
        lastAlert = datetime.datetime.today()
        frames = 0

        # loop over the frames of the video feed and detect motion
        while True:
            # if we are busy processing a job then skip motion until we are done
            #if self.ENVIRON["listen"] == False:
            if busyCheck(self.ENVIRON, self.logger) == True:
                continue

            # grab the current frame and initialize the occupied/unoccupied text
            self.logger.debug("Getting another frame. ENVIRON listen = %s" %
                              self.ENVIRON["listen"])
            (grabbed, frame) = camera.read()
            frames += 1

            # resize the frame, convert it to grayscale, and blur it
            frame = imutils.resize(frame, width=500)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (21, 21), 0)

            # if the first frame is None, initialize it
            if firstFrame is None:
                firstFrame = gray
                continue

            # compute the absolute difference between the current frame and first frame
            frameDelta = cv2.absdiff(firstFrame, gray)
            thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
            # Update the reference frame
            firstFrame = gray
            # dilate the thresholded image to fill in holes, then find contours on thresholded image
            thresh = cv2.dilate(thresh, None, iterations=2)
            #open CV seems to change the number of params returned
            try:
                (img, cnts, _) = cv2.findContours(thresh.copy(),
                                                  cv2.RETR_EXTERNAL,
                                                  cv2.CHAIN_APPROX_SIMPLE)
            except:
                (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)

            # loop over the contours
            for c in cnts:
                # if the contour is too small, ignore it
                if cv2.contourArea(c) < self.min_area:
                    continue
                #motion must be detected, see if we need to trigger a new Alert for the motion
                lastAlert = self.detectionEvent(lastAlert, camera)

            # check the ENVIRON when frame count reaches check point
            if frames > self.framesCheck:
                self.logger.debug(
                    "Checking to see if we should stop detecting motion")
                frames = 0
                if not self.ENVIRON["motion"]:
                    self.logger.debug("Time to stop detecting motion")
                    # cleanup the camera quit function
                    camera.release()
                    break
 def interrupt_callback(self):
     if busyCheck(self.ENVIRON, self.logger) == True:
         self.interrupted = True
     return self.interrupted
    def detectWithCam(self):
        self.logger.debug("Starting to detect Motion")
        #variables for TensorFlow human detection    
        model_path = os.path.join(self.TOPDIR, "client/objectDetect/ssd_mobilenet_v1_coco_2017_11_17/frozen_inference_graph.pb")
        detector = detectorAPI(self.ENVIRON, path_to_ckpt=model_path)

        # define feed from camera
        camera = cv2.VideoCapture(0)
        time.sleep(1)
        # initialize variables used by the motion sensing
        firstFrame = None
        lastAlert = datetime.datetime.today()
        frames = 0

        # loop over the frames of the video feed and detect motion
        while True:
            # if we are busy processing a job then skip motion until we are done
            #if self.ENVIRON["listen"] == False:
            if busyCheck(self.ENVIRON, self.logger) == True:
                continue
                
            # grab the current frame and initialize the occupied/unoccupied text
            self.logger.debug("Getting another frame. ENVIRON listen = %s" % self.ENVIRON["listen"])
            (grabbed, frame) = camera.read()
            frames += 1

            # resize the frame, convert it to grayscale, and blur it
            frame = imutils.resize(frame, width=500)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (21, 21), 0)

            # if the first frame is None, initialize it
            if firstFrame is None:
                firstFrame = gray
                continue

            # compute the absolute difference between the current frame and first frame
            frameDelta = cv2.absdiff(firstFrame, gray)
            thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
            # Update the reference frame
            firstFrame = gray
            # dilate the thresholded image to fill in holes, then find contours on thresholded image
            thresh = cv2.dilate(thresh, None, iterations=2)
            (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

            # loop over the contours
            for c in cnts:
                # if the contour is too small, ignore it
                if cv2.contourArea(c) < self.min_area:
                    continue
                #motion detected, see if a person was detected by objectDetect
                #------------------------------------------------------------
                objDict = detector.objectCount(frame)
                if 'person' in objDict:
                    lastAlert = self.detectionEvent(lastAlert, camera)

            # check the ENVIRON when frame count reaches check point
            if frames > self.framesCheck:
                self.logger.debug("Checking to see if we should stop detecting motion")
                frames = 0
                if not self.ENVIRON["motion"]:
                    self.logger.debug("Time to stop detecting motion")
                    # cleanup the camera quit function
                    camera.release()
                    break