def __init__(self): super(Cognitive, self).__init__() self.api = cognitive_face file_path = Utilities.absolute_path(__file__, 'secret.json') with open(file_path) as data_file: data = json.load(data_file) KEY = data['secret'] self.api.Key.set(KEY)
def speak(alias): sounds_dir = Utilities.voices_path() save_dir = Utilities.absolute_path(sounds_dir, alias + '.mp3') if os.path.exists(save_dir) is True: # print save_dir pygame.mixer.init(44100, -16, 2, 2048) pygame.mixer.music.load(save_dir) pygame.mixer.music.play() time.sleep(1.05)
def __init__(self, camera): """Initialization Args: camera (int): index of camera to use """ self.cascade_file = Utilities.absolute_path(__file__, 'cascade.xml') self.face_cascade = cv2.CascadeClassifier(self.cascade_file) self.video_capture = cv2.VideoCapture(camera) self.video_capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640) self.video_capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480) self.faces_captured = 1
def start_capturing(self, is_register): """Start the camera, look for faces and write to file when faces are captured Args: is_register (bool): determine if the session is registering or identifying If this argument is true then camera will capture 3 photos of user. Otherwise, only 1 photo is captured """ if is_register is False: self.faces_captured = DEFAULT_FACES - 1 while self.faces_captured < DEFAULT_FACES: # Read frame from video capture return_code, frame = self.video_capture.read() # Use Haar cascade to detect faces in captured frame faces = self.face_cascade.detectMultiScale( cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) # Looping through captured faces and write to file for (x, y, w, h) in faces: # All captured images will be saved in tmp folder in # core/face_recognizer filePath = Utilities.absolute_path( __file__, 'tmp/face%d.jpg' % self.faces_captured) self.faces_captured += 1 cv2.imwrite(filePath, frame) if is_register: # Sleep 2 seconds so user can change face orientation time.sleep(2) break if cv2.waitKey(1) & 0xFF == ord('q'): self.stop_capturing() break # cv2.imshow('Video', frame) self.stop_capturing()
def register(group, alias): data_path = Utilities.absolute_path(Utilities.train_data_path(), alias) with open(os.path.join(data_path, 'name.txt'), 'r') as file: name = file.read() person = group.person_with_name(name) person.alias = alias create_voice_thread = threading.Thread( name="create_voice", target=Person.__create_voice, args=(name, alias)) person.save() save_faces_thread = threading.Thread(name="save_faces", target=Person.__save_faces, args=(person, data_path)) processes = ProcessParallel(create_voice_thread, save_faces_thread) processes.fork_threads() processes.start_all() # Wait until all threads are done processes.join_all()
def create_voice(text, alias): sounds_dir = Utilities.voices_path() save_dir = Utilities.absolute_path(sounds_dir, alias + '.mp3') if os.path.exists(save_dir) is False: headers = { 'Content-Type': 'application/json', 'api_key': 'b5db987b5a944ff78097d435a5a564dc' } try: response = requests.request( 'POST', 'http://api.openfpt.vn/text2speech/v3', headers=headers, data=text ) if response.status_code not in (200, 202): print response.status_code if response.text: result = response.json() if result['async']: wget.download(result['async'], save_dir) except Exception as e: print e