def play_pause(): """ Called when the Play/Pause button is pressed. Starts/stops/resumes a song. """ if not Playlist.is_playing(): play_pause_btn.setText("Pause") Playlist.play() now_playing_text.show() if not current_song_text.isVisible(): current_song_text.setText("{}".format(Playlist.get_current_song())) current_song_text.show() else: play_pause_btn.setText("Play") Playlist.pause() now_playing_text.hide()
def refresh(): """ Called when new images are to be displayed on the screen (i.e. new reaction from the user). Sets a new pixmap component for each of the image object on the screen. Finally, repaints the whole window. """ # The later two parameters of scaled() are for keeping the aspect ratio and smoothing the result of scaling camera_img.setPixmap(QPixmap(CAMERA_IMG_PATH).scaled(camera_img.width(), camera_img.height(), 1, 1)) live_img.setPixmap(QPixmap(LIVE_IMG_PATH).scaled(live_img.width(), live_img.height(), 1, 1)) prog_img.setPixmap(QPixmap(PROG_IMG_PATH).scaled(prog_img.width(), prog_img.height(), 1, 1)) if Playlist.is_playing(): current_song_text.setText("{}".format(Playlist.get_current_song())) window.show()
def play_entry(label): """ Called to trigger the playing/pausing of a specific entry, given its label. This also sets specific buttons and labels to appropriate states to reflect the change. :param label: The label containing the name of the entry to be played or paused. :return: """ Playlist.play_song(label) if not Playlist.is_playing(): user_interface.GUI.play_pause_btn.setText("Pause") user_interface.GUI.now_playing_text.show() user_interface.GUI.current_song_text.setText("{}".format(label.text())) if not user_interface.GUI.current_song_text.isVisible(): user_interface.GUI.current_song_text.show() else: user_interface.GUI.play_pause_btn.setText("Play") Playlist.pause() user_interface.GUI.now_playing_text.hide()
def main(): """ Main method. """ # Choose running model. global args args = CLIparser.parseFlags() db, sessionID = initialize() # Start the camera and the GUI. thread = threading.Thread(target=GUI.run) thread.setDaemon(True) vc = cv2.VideoCapture(0) fv = FaceVerification(sessionID, args.test) GUI.setSomeVariables(vc, sessionID, fv) # vc.set(cv2.CAP_PROP_BUFFERSIZE, 1) thread.start() start_time = time.time() while True: frame = get_frame(vc) # Query the Model once every 3 seconds. end_time = time.time() if end_time - start_time < 3.1: # Uncomment the following two lines for a continuos camera feed. #WARNING: A Raspberry Pi 4 model B, 2GB RAM, will not support this change. # cv2.imwrite('frame.png', frame) # GUI.refresh_frame() continue start_time = time.time() if not GUI.dead and not GUI.frozen: if args.azure: # Query Azure. if fv.getStatus(): emotions = get_facial_emotion(frame, fv) else: emotions = get_facial_emotion(frame, None) else: # Query our model. remove_frame() face_isolated = facechop(frame) emotions = classify(None, face_isolated) if emotions: if Playlist.is_playing(): # Update global descriptors based on song descriptors and user emotions. current_song = Playlist.get_current_song() current_descriptors = Tracklist.get_song( db, current_song)['descriptors'] emotion_list = getEmotionList(emotions) descriptors.update_descriptors(emotion_list, current_descriptors) progress_history.update_progress_log( db, sessionID, emotion_list) current_song_score = descriptors.get_song_score( current_descriptors) if args.test: print('Current song score: %s' % current_song_score) # Change song if song score is low. if current_song_score <= THRESHOLD: Playlist.skip_song() remove_frame("progress_plot") remove_frame("emotions_plot") if args.azure: plotter.write_plot(emotions) else: plotter.write_plot({'': emotions}) GUI.refresh() if GUI.dead: if args.test: print("GUI is closed, shutting down...") break print("[EMMA]: Closing the camera...") close_camera(vc) cleanup() thread.join()