Ejemplo n.º 1
0
def produce_training_data(who: str, timeout: int = 2000):
    output_dir = os.sep.join(['resources/train', who])
    output = os.sep.join([output_dir, who])
    os.makedirs(output_dir)

    context = ConsumerContext(None, False)
    detector_consumer = FacedetectorConsumer(set_detector_parameters())
    cap = cv2.VideoCapture(0)

    try:
        count = -1
        while cv2.waitKey(timeout) == -1:
            success, frame = cap.read()
            if success:
                context.data = frame
                detector_consumer.run(context)
                face = context.data
            if context.alert:
                count += 1
                cv2.imwrite(filename=output + '.' + str(count) + '.png',
                            img=face)
                cv2.imshow('Saved', face)
    finally:
        cap.release()
        cv2.destroyAllWindows()
Ejemplo n.º 2
0
	def run(self, context: ConsumerContext):
		if not self.initialized:
			self.initialize()

		img = context.data
		context.alert = False

		if img is not None:
			img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
			faces = self.face_cascade.detectMultiScale(
				image=img,
				scaleFactor=self.parameters['scale_factor'],
				minNeighbors=self.parameters['min_neighbors'])

			# Take one of the faces and process that
			if len(faces) > 0:
				(x, y, w, h) = faces[0]
				context.alert = True
				context.alert_data = 'Face detected'
				context.data = img[y:(y + h), x:(x + w)]
				FacedetectorConsumer.LOGGER.info(context.alert_data)
			else:
				FacedetectorConsumer.LOGGER.debug('Could not detect any faces')
		else:
			FacedetectorConsumer.LOGGER.warning('No image')
			time.sleep(self.parameters['timeout'])

		return context
Ejemplo n.º 3
0
def test():
    # Given
    detector_consumer = FacedetectorConsumer(set_detector_parameters())
    recognizer_consumer = FacerecognizerConsumer(set_parameters())
    context = ConsumerContext(None, False)
    cap = cv2.VideoCapture(0)

    # When
    try:
        while cv2.waitKey(50) == -1:
            success, frame = cap.read()
            # Detection
            if success:
                context.data = frame
                detector_consumer.run(context)
                face = context.data
            # Recognition
            if context.alert:
                recognizer_consumer.run(context)
                face = resize_image(face, 300)
                cv2.putText(face, context.alert_data, (5, 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
                cv2.imshow('Face', face)
    finally:
        cap.release()
        cv2.destroyAllWindows()
Ejemplo n.º 4
0
 def run(self, context: ConsumerContext):
     TestConsumer.LOGGER.info('Working...')
     time.sleep(5)
     TestConsumer.LOGGER.info('Done...')
     if 10 <= int(context.data):
         return ConsumerContext(_alert_data='DATA reached 10', _alert=True)
     else:
         return ConsumerContext(_data='data', _alert=False)
Ejemplo n.º 5
0
	def run(self, context: ProcessContext):
		"""
		This method runs the stream.
		:param context: Process context
		"""
		self.validate()

		# for inter-process communication
		data_proxy = context.get_prop('shared_data_proxy')
		sc_queue = context.get_prop('sc_queue')
		
		# stream main loop
		while True:
			try:
				Stream.LOGGER.debug(self.name + ' calling producer')
				data = self.producer.get_data(data_proxy)
				
				c_context = ConsumerContext(data, True)
				for consumer in self.consumers:
					if not c_context.alert:
						break
					Stream.LOGGER.debug(self.name + ' calling consumer: ' + consumer.get_name())
					c_context = consumer.run(c_context)

				if c_context.alert and self.zone_manager.is_zone_active(self.producer.get_zone()):
					Stream.LOGGER.debug(self.name + ' enqueueing controller message')
					sc_queue.put(StreamControllerMessage(
						_alert=c_context.alert,
						_msg=c_context.alert_data,
						_sender=self.name))
			except Exception as e:
				Stream.LOGGER.error('Something really bad happened: ' + e.__str__())
Ejemplo n.º 6
0
	def run(self, context: ConsumerContext):
		if not self.initialized:
			self.initialize()

		# the data is expected to be the detected face
		face = context.data
		context.alert = True

		if face is not None:
			face = cv2.resize(face, (self.parameters['size'], self.parameters['size']))
			name = self.recognize(face)
			if name is None:
				context.alert_data = 'Cannot recognize face'
			else:
				context.alert = False
				context.alert_data = name
		else:
			FacerecognizerConsumer.LOGGER.warning('Face was not provided (is None)')

		return context
Ejemplo n.º 7
0
	def run(self, context: ConsumerContext):
		img = context.data
		context.alert = False

		if img is None:
			MotiondetectorConsumer.LOGGER.debug('No image')
			time.sleep(self.parameters['timeout'])
			return context

		frame = cv2.resize(img, (self.parameters['resize_width'], self.parameters['resize_height']))
		gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
		gray = cv2.GaussianBlur(gray, (21, 21), 0)

		# if the first frame is None, initialize it
		if self.previous_frame is None:
			self.previous_frame = gray
			return context

		# compute the absolute difference between the current frame and previous frame
		frame_delta = cv2.absdiff(self.previous_frame, gray)
		(_, thresh) = cv2.threshold(src=frame_delta,
									thresh=self.parameters['threshold'],
									maxval=self.parameters['threshold_max_val'],
									type=cv2.THRESH_BINARY)

		# dilate the image to fill in holes, then find contours on image
		thresh = cv2.dilate(thresh, None, iterations=self.parameters['dilate_iteration'])
		(_, contours, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

		# loop over the contours
		motion_detected = any([True for c in contours if cv2.contourArea(c) > self.parameters['area_threshold']])
		if motion_detected:
			context.alert = True
			context.alert_data = 'Motion detected'
			MotiondetectorConsumer.LOGGER.debug(context.alert_data)
		else:
			MotiondetectorConsumer.LOGGER.debug('No motion was detected')

		self.previous_frame = gray
		return context
Ejemplo n.º 8
0
def integration_test():
	# Given
	parameters = set_parameters()
	consumer = BodydetectorConsumer(parameters)
	context = ConsumerContext(None, False)
	cap = cv2.VideoCapture(0)

	# When
	try:
		count = 0
		while cv2.waitKey(100) == -1:
			success, frame = cap.read()
			if success:
				context.data = cv2.resize(frame, (parameters['resize_width'], parameters['resize_height']))
				consumer.run(context)
			if context.alert:
				cv2.imshow('Frame', context.data)
				print('Detected: ' + str(count))
				count += 1
	finally:
		cap.release()
		cv2.destroyAllWindows()
Ejemplo n.º 9
0
def get_images_and_labels(path: str, size: int):
    labels_with_images = {}
    names_with_labels = {}
    image_paths = []
    detector_consumer = FacedetectorConsumer(set_detector_parameters())
    context = ConsumerContext(None, False)

    for root, dirs, files in os.walk(path):
        image_paths += [
            os.sep.join([root, file]) for file in files
            if file.endswith('.png')
        ]

    global_label = 0
    try:
        for image_path in image_paths:
            image = cv2.imread(image_path)

            # Detect face in image
            context.data = image
            detector_consumer.run(context)
            face = context.data

            if face is not None and context.alert:
                name = os.path.split(image_path)[1].split('.')[0]
                if not names_with_labels.__contains__(name):
                    names_with_labels[name] = global_label
                    labels_with_images[global_label] = []
                    global_label += 1

                face = resize_image(face, size)
                label = names_with_labels[name]
                labels_with_images[label].append(face)
                cv2.imshow('Adding face to traning set...', face)
                cv2.waitKey(5)
    finally:
        cv2.destroyAllWindows()
        return labels_with_images, names_with_labels
Ejemplo n.º 10
0
    def run(self, context: ConsumerContext):
        if not self.initialized:
            self.initialize()

        # the data is expected to be the detected face
        face = context.data
        context.alert = True

        if face is not None:
            NnrecognizerConsumer.LOGGER.info('Running face recognition...')
            if self.recognize(face):
                context.alert = False
                context.alert_data = 'Positive recognition'
                NnrecognizerConsumer.LOGGER.info(context.alert_data)
            else:
                context.alert_data = NnrecognizerConsumer.img_to_str(face)
                NnrecognizerConsumer.LOGGER.info('Negative recognition')
        else:
            NnrecognizerConsumer.LOGGER.warning(
                'Face was not provided (is None)')
            context.alert_data = 'No face provided'

        return context
Ejemplo n.º 11
0
    def run(self, context: ConsumerContext):
        if not self.initialized:
            self.initialize()

        audio = None
        audio = context.data
        context.alert = False
        zones = self.zone_manager.get_zones()

        if audio:
            try:
                voice_recognition = self.VoiceRecognizer.recognize_google(
                    audio)
                VoicerecognizerConsumer.LOGGER.info('You said: ' +
                                                    voice_recognition)

                #Search in the zone dictionary the word that the user said
                for key, value in zones.items():
                    if key in voice_recognition:
                        #search for the value in the word, whether on or off
                        if 'off' in voice_recognition:
                            if zones[key] == False:
                                VoicerecognizerConsumer.LOGGER.info(
                                    key + ' is already inactive')
                                break
                            else:
                                self.zone_manager.toggle_zone(key)
                                break
                        if 'on' in voice_recognition:
                            if zones[key] == True:
                                VoicerecognizerConsumer.LOGGER.info(
                                    key + ' is already active')
                                break
                            else:
                                self.zone_manager.toggle_zone(key)
                                break
            except sr.UnknownValueError:
                VoicerecognizerConsumer.LOGGER.info(
                    'Voicerecognizer could not understand audio')
            except sr.RequestError as e:
                print(
                    "Could not request results from Speech Recognition service; {0}"
                    .format(e))
        time.sleep(3)
        return context