def recognize(sentence): # with Recognition(sentence) as recognize: recognition = Recognition.Recognition(gpio) recognition.answer(sentence.encode('utf-8')) gpio.cleanup() # print('Exiting..') # time.sleep(2) restart()
def main(): test = recognition.Recognition() vc = voice.Voice() name = test.getFromCam() if (name != 'Unknown'): vc.sayHello(str(name)) else: test.RegisterNewPerson()
def snapshot(self): # Get a frame from the video source ret, frame = self.vid.get_frame() if ret: imageName = os.path.curdir + "\\images\\frame-" + time.strftime( "%d-%m-%Y-%H-%M-%S") + ".jpg" cv2.imwrite(imageName, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) # face recognition here recognition.Recognition(os.path.curdir + "\\test\\" + self.knowImg, imageName)
def process_image(self): if self.path_to_source_file == '': return reconAlgorithm = recognition.Recognition(self.path_to_source_file) recon_image = reconAlgorithm.new_path found_image = reconAlgorithm.final_recon_path self.load_pic_to_window_right(recon_image) self.load_recon_image(found_image) self.load_text_to_recon_info(reconAlgorithm.result)
import communication import recognition import threading # import logging haar_file_path = 'haarcascade_frontalface_default.xml' SOURCE_IP = '127.0.0.1' SOURCE_PORT = 5006 DESTINATION_IP = '127.0.0.1' DESTINATION_PORT = 5005 connection = communication.Communication(SOURCE_IP, DESTINATION_IP, SOURCE_PORT, DESTINATION_PORT) connection.bind_socket() face_recognition = recognition.Recognition(haar_file_path) # log = logging.getLogger(__name__) # LOG_FILENAME = 'local.log' # logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG, format='%(asctime)s %(message)s') def receive(): while True: data, address = connection.receive_packet() print(data, address) command = "" t = threading.Thread(target=receive) t.daemon = True t.start() while command != 'q':