Ejemplo n.º 1
0
	def run(self, context: ConsumerContext):
		if not self.initialized:
			self.initialize()

		img = context.data
		context.alert = False

		if img is not None:
			img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
			faces = self.face_cascade.detectMultiScale(
				image=img,
				scaleFactor=self.parameters['scale_factor'],
				minNeighbors=self.parameters['min_neighbors'])

			# Take one of the faces and process that
			if len(faces) > 0:
				(x, y, w, h) = faces[0]
				context.alert = True
				context.alert_data = 'Face detected'
				context.data = img[y:(y + h), x:(x + w)]
				FacedetectorConsumer.LOGGER.info(context.alert_data)
			else:
				FacedetectorConsumer.LOGGER.debug('Could not detect any faces')
		else:
			FacedetectorConsumer.LOGGER.warning('No image')
			time.sleep(self.parameters['timeout'])

		return context
Ejemplo n.º 2
0
	def run(self, context: ConsumerContext):
		if not self.initialized:
			self.initialize()

		# the data is expected to be the detected face
		face = context.data
		context.alert = True

		if face is not None:
			face = cv2.resize(face, (self.parameters['size'], self.parameters['size']))
			name = self.recognize(face)
			if name is None:
				context.alert_data = 'Cannot recognize face'
			else:
				context.alert = False
				context.alert_data = name
		else:
			FacerecognizerConsumer.LOGGER.warning('Face was not provided (is None)')

		return context
Ejemplo n.º 3
0
	def run(self, context: ConsumerContext):
		img = context.data
		context.alert = False

		if img is None:
			MotiondetectorConsumer.LOGGER.debug('No image')
			time.sleep(self.parameters['timeout'])
			return context

		frame = cv2.resize(img, (self.parameters['resize_width'], self.parameters['resize_height']))
		gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
		gray = cv2.GaussianBlur(gray, (21, 21), 0)

		# if the first frame is None, initialize it
		if self.previous_frame is None:
			self.previous_frame = gray
			return context

		# compute the absolute difference between the current frame and previous frame
		frame_delta = cv2.absdiff(self.previous_frame, gray)
		(_, thresh) = cv2.threshold(src=frame_delta,
									thresh=self.parameters['threshold'],
									maxval=self.parameters['threshold_max_val'],
									type=cv2.THRESH_BINARY)

		# dilate the image to fill in holes, then find contours on image
		thresh = cv2.dilate(thresh, None, iterations=self.parameters['dilate_iteration'])
		(_, contours, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

		# loop over the contours
		motion_detected = any([True for c in contours if cv2.contourArea(c) > self.parameters['area_threshold']])
		if motion_detected:
			context.alert = True
			context.alert_data = 'Motion detected'
			MotiondetectorConsumer.LOGGER.debug(context.alert_data)
		else:
			MotiondetectorConsumer.LOGGER.debug('No motion was detected')

		self.previous_frame = gray
		return context
Ejemplo n.º 4
0
    def run(self, context: ConsumerContext):
        if not self.initialized:
            self.initialize()

        # the data is expected to be the detected face
        face = context.data
        context.alert = True

        if face is not None:
            NnrecognizerConsumer.LOGGER.info('Running face recognition...')
            if self.recognize(face):
                context.alert = False
                context.alert_data = 'Positive recognition'
                NnrecognizerConsumer.LOGGER.info(context.alert_data)
            else:
                context.alert_data = NnrecognizerConsumer.img_to_str(face)
                NnrecognizerConsumer.LOGGER.info('Negative recognition')
        else:
            NnrecognizerConsumer.LOGGER.warning(
                'Face was not provided (is None)')
            context.alert_data = 'No face provided'

        return context
Ejemplo n.º 5
0
    def run(self, context: ConsumerContext):
        if not self.initialized:
            self.initialize()

        audio = None
        audio = context.data
        context.alert = False
        zones = self.zone_manager.get_zones()

        if audio:
            try:
                voice_recognition = self.VoiceRecognizer.recognize_google(
                    audio)
                VoicerecognizerConsumer.LOGGER.info('You said: ' +
                                                    voice_recognition)

                #Search in the zone dictionary the word that the user said
                for key, value in zones.items():
                    if key in voice_recognition:
                        #search for the value in the word, whether on or off
                        if 'off' in voice_recognition:
                            if zones[key] == False:
                                VoicerecognizerConsumer.LOGGER.info(
                                    key + ' is already inactive')
                                break
                            else:
                                self.zone_manager.toggle_zone(key)
                                break
                        if 'on' in voice_recognition:
                            if zones[key] == True:
                                VoicerecognizerConsumer.LOGGER.info(
                                    key + ' is already active')
                                break
                            else:
                                self.zone_manager.toggle_zone(key)
                                break
            except sr.UnknownValueError:
                VoicerecognizerConsumer.LOGGER.info(
                    'Voicerecognizer could not understand audio')
            except sr.RequestError as e:
                print(
                    "Could not request results from Speech Recognition service; {0}"
                    .format(e))
        time.sleep(3)
        return context