def __init__(self, flask_host=config["output_server_host"], flask_port=config["output_server_port"], flask_url=config["output_server_url"], voice_hash=config["default_voice"], mary_host=config["marytts_host"], mary_port=config["marytts_port"]): """ Here we create objects of main components of Output, which are controled by this class. Also we create "_is_running" flag to be able to stop running and Rlock object to lock self and threads of controlled object when it is necessary """ super(OutputController, self).__init__() self._lock = threading.RLock() self._receiver = CommandReceiver(flask_host, flask_port, flask_url, self._lock) self._tts = TTSClient(voice_hash, mary_host, mary_port, self) self._audioplayer = AudioPlayer(controller=self) # lists for controlling text to speech and its playing self.tts_phrases_queue = [] self.text_to_say = [] self._is_video_playing = False self._is_running = True self._cur_filetype = None self._zoom_factor = 1 # it saves volume to set it after turning off mute # and to know how to increase/decrease it self._speech_volume = 0 self._music_volume = 0 self._muted = False
def __init__(self) -> None: # Initialize pygame pygame.init() self.audio_player = AudioPlayer() # Setup to draw maps self.tile_size_pixels = 20 desired_win_size_pixels = Point(2560, 1340) if desired_win_size_pixels is None: self.screen: pygame.surface.Surface = pygame.display.set_mode( (0, 0), pygame.FULLSCREEN | pygame.NOFRAME | pygame.SRCALPHA | pygame.DOUBLEBUF | pygame.HWSURFACE) self.win_size_pixels: Point = Point(self.screen.get_size()) self.win_size_tiles: Point = (self.win_size_pixels / self.tile_size_pixels).floor() else: self.win_size_tiles = (desired_win_size_pixels / self.tile_size_pixels).floor() self.win_size_pixels = self.win_size_tiles * self.tile_size_pixels self.screen = pygame.display.set_mode( self.win_size_pixels.getAsIntTuple(), pygame.SRCALPHA | pygame.DOUBLEBUF | pygame.HWSURFACE) self.image_pad_tiles = self.win_size_tiles // 2 * 4 # Initialize GameInfo import os base_path = os.path.split(os.path.abspath(__file__))[0] game_xml_path = os.path.join(base_path, 'game.xml') self.game_info = GameInfo(base_path, game_xml_path, self.tile_size_pixels, self.win_size_pixels) self.is_running = True
def handle_death(self, message_dialog: Optional[GameDialog] = None) -> None: if not self.hero_party.has_surviving_members(): # Player death self.hero_party.main_character.hp = 0 AudioPlayer().stop_music() AudioPlayer().play_sound('player_died') GameDialog.create_encounter_status_dialog(self.hero_party).blit( self.screen, False) gde = GameDialogEvaluator(self.game_info, self) if message_dialog is None: message_dialog = GameDialog.create_message_dialog() else: message_dialog.add_message('') gde.add_and_wait_for_message('Thou art dead.', message_dialog) gde.wait_for_acknowledgement(message_dialog) for hero in self.hero_party.members: hero.curr_pos_dat_tile = hero.dest_pos_dat_tile = self.game_info.death_hero_pos_dat_tile hero.curr_pos_offset_img_px = Point(0, 0) hero.direction = self.game_info.death_hero_pos_dir hero.hp = hero.level.hp hero.mp = hero.level.mp gde.update_default_dialog_font_color() self.pending_dialog = self.game_info.death_dialog self.hero_party.gp = self.hero_party.gp // 2 self.set_map(self.game_info.death_map, respawn_decorations=True)
def __init__(self): #Tkinter self.root = Tk() self.audioPlayer = AudioPlayer() self.initWindow() self.queue = queue.Queue() self.authenticator = Authenticator() self.startAuthentication() self.root.mainloop()
class Playlist: SIZE = 5 RECENTLY_PLAYED_TIMEOUT_SEC = 60*30 song_list = list() recently_played = dict() audioPlayer = None def __init__(self): self.audioPlayer = AudioPlayer() self.update_song_list() def update_song_list(self): if len(self.song_list) == self.SIZE: return self.clean_recently_played() client = MongoClient() coll = client[configurations.DB.NAME][configurations.DB.COLLECTIONS.SONGS] songs = list(coll.find({}).sort("upvotes", -1)) if len(songs) == len(self.recently_played): self.recently_played = dict() self.song_list = list() while len(self.song_list) < self.SIZE and len(songs) > 0: song = songs.pop(0) if song['_id'] not in self.recently_played: self.song_list.append(song) client.close() def clean_recently_played(self): new_list = dict() now = int(time.time()) for song in self.recently_played: if (now - self.recently_played[song]) < self.RECENTLY_PLAYED_TIMEOUT_SEC: new_list[song] = self.recently_played[song] self.recently_played = new_list def play_next_song(self): if len(self.song_list) == 0: self.update_song_list() if len(self.song_list) != 0: song = self.song_list.pop(0) self.recently_played[song['_id']] = int(time.time()) self.audioPlayer.play(song) return True return False def get_currently_playing(self): return self.audioPlayer.song def is_initial(self): return self.audioPlayer.is_initial()
def __init__(self) -> None: self.is_running = True # Initialize pygame pygame.init() self.audio_player = AudioPlayer() self.clock = pygame.time.Clock() # Setup to draw maps self.tile_size_pixels = 32 desired_win_size_pixels = Point(2560, 1340) if desired_win_size_pixels is None: self.screen: pygame.surface.Surface = pygame.display.set_mode( (0, 0), pygame.FULLSCREEN | pygame.NOFRAME | pygame.SRCALPHA) self.win_size_pixels: Point = Point(self.screen.get_size()) self.win_size_tiles: Point = self.win_size_pixels // self.tile_size_pixels else: self.win_size_tiles = desired_win_size_pixels // self.tile_size_pixels self.win_size_pixels = self.win_size_tiles * self.tile_size_pixels self.screen = pygame.display.set_mode( self.win_size_pixels.getAsIntTuple(), pygame.SRCALPHA) self.image_pad_tiles = self.win_size_tiles // 2 * 4 # Initialize GameInfo import os base_path = os.path.split(os.path.abspath(__file__))[0] game_xml_path = os.path.join(base_path, 'game.xml') from GameInfo import GameInfo self.game_info = GameInfo(base_path, game_xml_path, self.tile_size_pixels, self.win_size_pixels) # Initialize the hero party self.hero_party = HeroParty( HeroState(self.game_info.character_types['hero'], Point(), Direction.NORTH, 'Camden', 20000)) # Setup a mock game state from unittest import mock from unittest.mock import MagicMock self.mock_game_state = mock.create_autospec(spec=GameStateInterface) self.mock_game_state.screen = self.screen self.mock_game_state.is_running = self.is_running self.mock_game_state.get_game_info = MagicMock( return_value=self.game_info) self.mock_game_state.get_image_pad_tiles = MagicMock( return_value=self.image_pad_tiles) self.mock_game_state.get_hero_party = MagicMock( return_value=self.hero_party) self.mock_game_state.check_progress_markers = MagicMock( return_value=True)
def __init__(self): #Tkinter self.root = Tk() self.initWindow() self.queue = queue.Queue() self.enroller = Enroller() self.audioPlayer = AudioPlayer() self.audioPlayer.playEnrollWelcome() #Blocked from here self.root.mainloop()
class MapViewer: def __init__(self) -> None: # Initialize pygame pygame.init() self.audio_player = AudioPlayer() # Setup to draw maps self.tile_size_pixels = 20 desired_win_size_pixels = Point(2560, 1340) if desired_win_size_pixels is None: self.screen: pygame.surface.Surface = pygame.display.set_mode( (0, 0), pygame.FULLSCREEN | pygame.NOFRAME | pygame.SRCALPHA | pygame.DOUBLEBUF | pygame.HWSURFACE) self.win_size_pixels: Point = Point(self.screen.get_size()) self.win_size_tiles: Point = (self.win_size_pixels / self.tile_size_pixels).floor() else: self.win_size_tiles = (desired_win_size_pixels / self.tile_size_pixels).floor() self.win_size_pixels = self.win_size_tiles * self.tile_size_pixels self.screen = pygame.display.set_mode( self.win_size_pixels.getAsIntTuple(), pygame.SRCALPHA | pygame.DOUBLEBUF | pygame.HWSURFACE) self.image_pad_tiles = self.win_size_tiles // 2 * 4 # Initialize GameInfo import os base_path = os.path.split(os.path.abspath(__file__))[0] game_xml_path = os.path.join(base_path, 'game.xml') self.game_info = GameInfo(base_path, game_xml_path, self.tile_size_pixels, self.win_size_pixels) self.is_running = True def __del__(self) -> None: # Terminate pygame self.audio_player.terminate() pygame.quit() def view_map(self, map_name: str) -> None: if not self.is_running: return if self.game_info.maps[map_name].tiled_filename is not None: print('Skipping tiled map', map_name, flush=True) return self.audio_player.play_music(self.game_info.maps[map_name].music) ScrollTest(self.screen, self.game_info, map_name).run()
def title_screen(self, text: str) -> None: # Play title music and display title screen AudioPlayer().play_music(self.title_music) title_image_size_px = Point(self.title_image.get_size()) title_image_size_px *= max( 1, int( min(self.win_size_pixels.w * 0.8 / title_image_size_px.w, self.win_size_pixels.h * 0.8 / title_image_size_px.h))) title_image = pygame.transform.scale( self.title_image, title_image_size_px.getAsIntTuple()) title_image_dest_px = Point( (self.win_size_pixels.w - title_image_size_px.w) / 2, self.win_size_pixels.h / 2 - title_image_size_px.h) screen = pygame.display.get_surface() screen.fill('black') screen.blit(title_image, title_image_dest_px) title_image = GameDialog.font.render(text, GameDialog.anti_alias, pygame.Color('white'), pygame.Color('black')) title_image_dest_px = Point( (self.win_size_pixels.w - title_image.get_width()) / 2, 3 * self.win_size_pixels.h / 4) screen.blit(title_image, title_image_dest_px) pygame.display.flip()
def on_click_downloading_table(self): print("\n") for currentQTableWidgetItem in self.tableFileDownloadingWidget.selectedItems( ): print(currentQTableWidgetItem.row(), currentQTableWidgetItem.column(), currentQTableWidgetItem.text()) audio = AudioPlayer(currentQTableWidgetItem.text()) start_new_thread(audio.start_audio, ()) print("Sound!")
def open_locked_item(self) -> Optional[MapDecoration]: locked_map_decoration = self.get_locked_map_decoration() if locked_map_decoration is not None: if locked_map_decoration.type is not None and locked_map_decoration.type.remove_sound is not None: AudioPlayer().play_sound( locked_map_decoration.type.remove_sound) self.remove_decoration(locked_map_decoration) return locked_map_decoration
def main(): cam = UsbWebcam() ml = Yolo() coord = Coordinate() audio = AudioPlayer() navigation = Thread(target=CoordinateNavigation, args=(coord, audio, cam, ml)) navigation.start() camera = Thread(target=CameraNavigation, args=(cam, ml, audio)) camera.start()
def test_deafness(test_trials=5): sound_was_heard = True # Reduced the volume if the sound was heard while sound_was_heard: audio_player = AudioPlayer(deafness_test_sound.as_posix(), dummy=dummy_audio_player) # Set output line to speaker in the middle audio_player.set_output_line(5) for j in range(test_trials): audio_player.play() print(Fore.GREEN + 'Has participant heard the sound? (y / n)' + Style.RESET_ALL) # Get input regarding perception userAnswer = str(input()) # Set status if userAnswer == 'n' or userAnswer == '0' or userAnswer == 'N': sound_was_heard = False clear_screen() print(Fore.RED + 'Do NOT forget to write down sound level!' + Style.RESET_ALL) input() clear_screen()
def choose_audio(self): path = QtGui.QFileDialog.getOpenFileName(self, u'Selecione um áudio', HOME_DIRECTORY, model.CONTENT_TYPES[model.Media.AUDIO]) if path == None: return self.audio_path = unicode(path) self.audio_player = AudioPlayer() self.ui.txt_sound.clear() self.ui.txt_sound.append(os.path.split(self.audio_path)[1]) self.audio_player.load_audio(self.audio_path) self.audio_player.setVisible(False)
def make_map_transition(self, transition: Optional[OutgoingTransition]) -> bool: if transition is None: return False src_map = self.game_info.maps[self.get_map_name()] dest_map = self.game_info.maps[transition.dest_map] # Find the destination transition corresponding to this transition if transition.dest_name is None: try: dest_transition = dest_map.transitions_by_map[ self.get_map_name()] except KeyError: print('Failed to find destination transition by dest_map', flush=True) return False else: try: dest_transition = dest_map.transitions_by_map_and_name[ self.get_map_name()][transition.dest_name] except KeyError: try: dest_transition = dest_map.transitions_by_name[ transition.dest_name] except KeyError: print('Failed to find destination transition by dest_name', flush=True) return False # If transitioning from outside to inside, save off last outside position if src_map.is_outside and not dest_map.is_outside: self.hero_party.set_last_outside_pos( self.get_map_name(), self.hero_party.get_curr_pos_dat_tile(), self.hero_party.get_direction()) # Make the transition and draw the map AudioPlayer().play_sound('walk_away') self.hero_party.set_pos(dest_transition.point, dest_transition.dir) self.set_map(transition.dest_map, respawn_decorations=transition.respawn_decorations) self.draw_map(True) # Slight pause on a map transition pygame.time.wait(250) return True
def handle_quit(self, force: bool = False) -> None: AudioPlayer().play_sound('select') if force: self.is_running = False # Save off initial background image background_surface = self.screen.copy() menu_dialog = GameDialog.create_yes_no_menu( Point(1, 1), 'Do you really want to quit?') menu_dialog.blit(self.screen, flip_buffer=True) menu_result = GameDialogEvaluator( self.game_info, self).get_menu_result(menu_dialog, allow_quit=False) if menu_result is not None and menu_result == 'YES': self.is_running = False # Restore initial background image menu_dialog.erase(self.screen, background_surface, flip_buffer=True)
def main(): audio = AudioPlayer() audio.Play('CollisionWarning.mp3') audio.Play('CollisionWarning.mp3') time.sleep(2.3) audio.Stop() print('Killed') audio.Play('CollisionWarning.mp3') audio.Play('CollisionWarning.mp3') print('wait 5') time.sleep(5) # audio.KillOtherSounds() audio.Play('CollisionWarning.mp3') audio.Play('CollisionWarning.mp3') time.sleep(5) # audio.KillOtherSounds() audio.Play('CollisionWarning.mp3') audio.Play('CollisionWarning.mp3') time.sleep(5)
def main(): fileToPlay = "audio\\white_noise_300.0ms_1000_bandwidth.wav" #fileToPlay = "audio\\wn_long.wav" audio_player = AudioPlayer(fileToPlay) trials = 1 try: for i in range(13): # i = 4 logging.info("Testing Line: " + str(i + 1)) audio_player.set_output_line(i + 1) #audi_player.set_output_line(9) for j in range(trials): audio_player.play() except KeyboardInterrupt: parser.exit('\nInterrupted by user') except Exception as e: parser.exit(type(e).__name__ + ': ' + str(e)) except TypeError as e: parser.exit(type(e).__name__ + ': ' + str(e))
def title_screen_loop(self, pc_name_or_file_name: Optional[str] = None) -> None: self.title_screen('Press any key') # Wait for user input - any key press while self.game_state.is_running: waiting_for_user_input = True for event in GameEvents.get_events(): if event.type == pygame.QUIT: self.game_state.handle_quit(force=True) elif event.type == pygame.KEYDOWN: AudioPlayer().play_sound('select') waiting_for_user_input = False break if waiting_for_user_input: pygame.time.wait(25) else: break # Prompt user for new game or to load a saved game if pc_name_or_file_name is None: # Get a list of the saved games saved_game_files = glob.glob( os.path.join(self.game_state.saves_path, '*.xml')) saved_games = [] for saved_game_file in saved_game_files: saved_games.append(os.path.basename(saved_game_file)[:-4]) while self.game_state.is_running: menu_options = [] if 0 < len(saved_games): menu_options.append('Continue a Quest') menu_options.append('Begin a Quest') if 0 < len(saved_games): menu_options.append('Delete a Quest') if self.game_state.should_add_math_problems_in_combat(): menu_options.append('Combat Mode: Math') else: menu_options.append('Combat Mode: Classic') message_dialog = GameDialog.create_message_dialog() message_dialog.add_menu_prompt(menu_options, 1) message_dialog.blit(self.game_state.screen, True) menu_result = self.gde.get_menu_result(message_dialog) # print('menu_result =', menu_result, flush=True) if menu_result == 'Continue a Quest': message_dialog.clear() message_dialog.add_menu_prompt(saved_games, 1) message_dialog.blit(self.game_state.screen, True) menu_result = self.gde.get_menu_result(message_dialog) if menu_result is not None: pc_name_or_file_name = menu_result break if menu_result == 'Delete a Quest': message_dialog.clear() message_dialog.add_menu_prompt(saved_games, 1) message_dialog.blit(self.game_state.screen, True) menu_result = self.gde.get_menu_result(message_dialog) if menu_result is not None: message_dialog.add_yes_no_prompt('Are you sure?') message_dialog.blit(self.game_state.screen, True) if self.gde.get_menu_result(message_dialog) == 'YES': saved_games.remove(menu_result) # Delete the save game by archiving it off saved_game_file = os.path.join( self.game_state.saves_path, menu_result + '.xml') self.game_state.archive_saved_game_file( saved_game_file, 'deleted') elif menu_result == 'Begin a Quest': message_dialog.clear() pc_name_or_file_name = self.gde.wait_for_user_input( message_dialog, 'What is your name?')[0] if pc_name_or_file_name in saved_games: message_dialog.add_message( 'Thou hast already started a quest. Dost thou want to start over?' ) message_dialog.add_yes_no_prompt() message_dialog.blit(self.game_state.screen, True) menu_result = self.gde.get_menu_result(message_dialog) if menu_result == 'YES': # Delete the existing save game by archiving it off saved_game_file = os.path.join( self.game_state.saves_path, pc_name_or_file_name + '.xml') self.game_state.archive_saved_game_file( saved_game_file, 'deleted') elif menu_result != 'NO': continue break elif menu_result is not None and menu_result.startswith( 'Combat Mode:'): self.game_state.toggle_should_add_math_problems_in_combat() # Load the saved game self.game_state.load(pc_name_or_file_name) self.gde.refresh_game_state()
from AudioPlayer import AudioPlayer lol = AudioPlayer() lol.play_music('organ_background.wav') lol.play_speech('process') lol.run()
def scroll_tile(self) -> None: transition: Optional[OutgoingTransition] = None # Determine the destination tile and pixel count for the scroll hero_dest_dat_tile = self.game_state.hero_party.members[ 0].dest_pos_dat_tile # Validate if the destination tile is navigable movement_allowed = self.game_state.can_move_to_tile(hero_dest_dat_tile) # Play a walking sound or bump sound based on whether the movement was allowed audio_player = AudioPlayer() movement_hp_penalty = 0 if movement_allowed: dest_tile_type = self.game_state.get_tile_info(hero_dest_dat_tile) for hero_idx in range(1, len(self.game_state.hero_party.members)): hero = self.game_state.hero_party.members[hero_idx] hero.dest_pos_dat_tile = self.game_state.hero_party.members[ hero_idx - 1].curr_pos_dat_tile if hero.curr_pos_dat_tile != hero.dest_pos_dat_tile: hero.direction = Direction.get_direction( hero.dest_pos_dat_tile - hero.curr_pos_dat_tile) # Determine if the movement should result in a transition to another map map_size = self.game_state.game_map.size() leaving_transition = self.game_state.game_info.maps[ self.game_state.get_map_name()].leaving_transition if leaving_transition is not None: if leaving_transition.bounding_box: if not leaving_transition.bounding_box.collidepoint( hero_dest_dat_tile.getAsIntTuple()): transition = leaving_transition elif (hero_dest_dat_tile[0] == 0 or hero_dest_dat_tile[1] == 0 or hero_dest_dat_tile[0] == map_size[0] - 1 or hero_dest_dat_tile[1] == map_size[1] - 1): transition = leaving_transition if transition is None: # TODO: Uncomment following two statements to disable coordinate logging #encounter_background = self.game_state.get_encounter_background(hero_dest_dat_tile) #print('Check for transitions at', hero_dest_dat_tile, encounter_background, flush=True) # See if this tile has any associated transitions transition = self.game_state.get_point_transition( hero_dest_dat_tile, filter_to_automatic_transitions=True) else: # Map leaving transition # print('Leaving map', self.gameState.mapState.mapName, flush=True) pass # Check for tile penalty effects if dest_tile_type.hp_penalty > 0 and not self.game_state.hero_party.is_ignoring_tile_penalties( ): audio_player.play_sound('hit_lvl_1') movement_hp_penalty = dest_tile_type.hp_penalty # Check for any status effect changes or healing to occur as the party moves has_low_health = self.game_state.hero_party.has_low_health() dialog_from_inc_step_count = self.game_state.hero_party.inc_step_counter( ) if has_low_health != self.game_state.hero_party.has_low_health(): # Change default dialog font color self.gde.update_default_dialog_font_color() # Redraw the map self.game_state.draw_map(True) if dialog_from_inc_step_count is not None: self.gde.dialog_loop(dialog_from_inc_step_count) else: self.game_state.hero_party.members[0].dest_pos_dat_tile = \ self.game_state.hero_party.members[0].curr_pos_dat_tile audio_player.play_sound('blocked') first_frame = True while self.game_state.hero_party.members[0].curr_pos_dat_tile != \ self.game_state.hero_party.members[0].dest_pos_dat_tile: # Redraws the characters when movement_allowed is True # print('advancing one tick in scroll_tile', flush=True) if movement_allowed and movement_hp_penalty > 0 and first_frame: flicker_surface = pygame.surface.Surface( self.game_state.screen.get_size()) flicker_surface.fill('red') flicker_surface.set_alpha(128) self.game_state.advance_tick(update_map=True, draw_map=True, advance_time=False, flip_buffer=False) self.game_state.screen.blit(flicker_surface, (0, 0)) self.game_state.advance_tick(update_map=False, draw_map=False, advance_time=True, flip_buffer=True) pygame.time.wait(20) first_frame = False else: self.game_state.advance_tick() if movement_allowed: # Apply health penalty and check for player death for hero in self.game_state.hero_party.members: if not hero.is_ignoring_tile_penalties(): hero.hp -= movement_hp_penalty self.gde.update_default_dialog_font_color() self.game_state.handle_death() # At destination - now determine if an encounter should start if not self.game_state.make_map_transition(transition): # Check for special monster encounters if (self.game_state.get_special_monster() is not None or (len(self.game_state.get_tile_monsters()) > 0 and random.uniform(0, 1) < dest_tile_type.spawn_rate)): # NOTE: Comment out the following line to disable encounters self.game_state.initiate_encounter() else: for x in range(CharacterSprite.get_tile_movement_steps()): self.game_state.advance_tick()
def exploring_loop(self) -> None: map_name = '' while self.game_state.is_running: # Generate the map state a mode or map change if map_name != self.game_state.get_map_name(): map_name = self.game_state.get_map_name() # Play the music for the map AudioPlayer().play_music(self.game_state.game_info.maps[ self.game_state.get_map_name()].music) # Draw the map to the screen self.game_state.draw_map() if self.game_state.pending_dialog is not None: self.gde.dialog_loop(self.game_state.pending_dialog) self.game_state.pending_dialog = None # Process events # print(datetime.datetime.now(), 'exploring_loop: Getting events...', flush=True) events = GameEvents.get_events(True) changed_direction = False for event in events: # print('exploring_loop: Processing event', event, flush=True) move_direction: Optional[Direction] = None menu = False talking = False searching = False opening = False if event.type == pygame.QUIT: self.game_state.handle_quit(force=True) elif event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: self.game_state.handle_quit() elif event.key == pygame.K_RETURN: menu = True elif event.key == pygame.K_SPACE: # Fast, smart interactions - skip launching the menu if self.game_state.get_npc_to_talk_to() is not None: talking = True elif self.game_state.is_facing_openable_item(): opening = True else: searching = True elif event.key == pygame.K_F1: AudioPlayer().play_sound('select') self.game_state.save(quick_save=True) else: move_direction = Direction.get_optional_direction( event.key) else: # print('exploring_loop: Ignoring event', event, flush=True) continue # print(datetime.datetime.now(), 'exploring_loop: Processed event', event, flush=True) # Clear queued events upon launching the menu GameEvents.clear_events() events = [] if move_direction: if changed_direction or self.game_state.hero_party.members[0].curr_pos_dat_tile != \ self.game_state.hero_party.members[0].dest_pos_dat_tile: # print('Ignoring move as another move is already in progress', flush=True) continue if move_direction != self.game_state.hero_party.members[ 0].direction: self.game_state.hero_party.members[ 0].direction = move_direction changed_direction = True else: self.game_state.hero_party.members[0].dest_pos_dat_tile = \ self.game_state.hero_party.members[0].curr_pos_dat_tile + move_direction.get_vector() if menu: AudioPlayer().play_sound('select') GameDialog.create_exploring_status_dialog( self.game_state.hero_party).blit( self.game_state.screen, False) menu_dialog = GameDialog.create_exploring_menu() menu_dialog.blit(self.game_state.screen, True) menu_result = self.gde.get_menu_result(menu_dialog) # print('menu_result =', menu_result, flush=True) if menu_result == 'TALK': talking = True elif menu_result == 'SEARCH': searching = True elif menu_result == 'OPEN': opening = True elif menu_result == 'STAIRS': if not self.game_state.make_map_transition( self.game_state.get_point_transition()): self.gde.dialog_loop('There are no stairs here.') elif menu_result == 'STATUS': GameDialog.create_full_status_dialog( self.game_state.hero_party).blit( self.game_state.screen, True) self.gde.wait_for_acknowledgement() elif menu_result == 'SPELL': # TODO: Need to choose the actor (spellcaster) actor = self.game_state.hero_party.main_character self.gde.set_actor(actor) available_spell_names = actor.get_available_spell_names( ) if len(available_spell_names) == 0: self.gde.dialog_loop( 'Thou hast not yet learned any spells.') else: menu_dialog = GameDialog.create_menu_dialog( Point( -1, menu_dialog.pos_tile.y + menu_dialog.size_tiles.h + 1), None, 'SPELLS', available_spell_names, 1) menu_dialog.blit(self.game_state.screen, True) menu_result = self.gde.get_menu_result(menu_dialog) # print( 'menu_result =', menu_result, flush=True ) if menu_result is not None: spell = self.game_state.game_info.spells[ menu_result] if actor.mp >= spell.mp: # TODO: Depending on the spell may need to select the target(s) targets = [actor] actor.mp -= spell.mp self.gde.set_targets( cast(List[CombatCharacterState], targets)) self.gde.dialog_loop(spell.use_dialog) GameDialog.create_exploring_status_dialog( self.game_state.hero_party).blit( self.game_state.screen, False) else: self.gde.dialog_loop( 'Thou dost not have enough magic to cast the spell.' ) # Restore the default actor and targets after calling the spell self.gde.restore_default_actor_and_targets() elif menu_result == 'ITEM': # TODO: Need to choose the hero to use an item actor = self.game_state.hero_party.main_character self.gde.set_actor(actor) item_cols = 2 item_row_data = actor.get_item_row_data() if len(item_row_data) == 0: self.gde.dialog_loop( 'Thou dost not have any items.') else: menu_dialog = GameDialog.create_menu_dialog( Point( -1, menu_dialog.pos_tile.y + menu_dialog.size_tiles.h + 1), None, 'ITEMS', item_row_data, item_cols, GameDialogSpacing.OUTSIDE_JUSTIFIED) menu_dialog.blit(self.game_state.screen, True) item_result = self.gde.get_menu_result(menu_dialog) # print('item_result =', item_result, flush=True) if item_result is not None: item_options = self.game_state.hero_party.main_character.get_item_options( item_result) if len(item_row_data) == 0: self.gde.dialog_loop( "The item vanished in [ACTOR]'s hands." ) else: menu_dialog = GameDialog.create_menu_dialog( Point( -1, menu_dialog.pos_tile.y + menu_dialog.size_tiles.h + 1), None, None, item_options, len(item_options)) menu_dialog.blit(self.game_state.screen, True) action_result = self.gde.get_menu_result( menu_dialog) # print('action_result =', action_result, flush=True) if action_result == 'DROP': # TODO: Add an are you sure prompt here self.game_state.hero_party.lose_item( item_result) elif action_result == 'EQUIP': self.game_state.hero_party.main_character.equip_item( item_result) elif action_result == 'UNEQUIP': self.game_state.hero_party.main_character.unequip_item( item_result) elif action_result == 'USE': item = self.game_state.hero_party.get_item( item_result) if item is not None and isinstance( item, Tool ) and item.use_dialog is not None: # TODO: Depending on the item may need to select the target(s) targets = [actor] self.gde.set_targets( cast( List[CombatCharacterState], targets)) self.gde.dialog_loop( item.use_dialog) else: self.gde.dialog_loop( '[ACTOR] studied the object and was confounded by it.' ) # Restore the default actor and targets after using the item self.gde.restore_default_actor_and_targets() elif menu_result is not None: print('ERROR: Unsupported menu_result =', menu_result, flush=True) # Erase menu self.game_state.draw_map() pygame.display.flip() if talking: npc = self.game_state.get_npc_to_talk_to() if npc: if npc.npc_info.dialog is not None: dialog = npc.npc_info.dialog self.game_state.draw_map() else: dialog = ['They pay you no mind.'] else: dialog = ['There is no one there.'] self.gde.dialog_loop(dialog, npc) if searching or opening: decorations = self.game_state.get_decorations() if searching: dialog = [ '[NAME] searched the ground and found nothing.' ] else: dialog = ['[NAME] found nothing to open.'] dest_tile = self.game_state.hero_party.members[0].curr_pos_dat_tile\ + self.game_state.hero_party.members[0].direction.get_vector() decorations += self.game_state.get_decorations( dest_tile) for decoration in decorations: requires_removal = False if decoration.type is not None: requires_removal = ( decoration.type.remove_with_search or decoration.type.remove_with_open or decoration.type.remove_with_key) if requires_removal: if ((searching and decoration.type.remove_with_search) or (opening and decoration.type.remove_with_open)): if decoration.type.remove_sound is not None: AudioPlayer().play_sound( decoration.type.remove_sound) self.game_state.remove_decoration( decoration) self.game_state.draw_map() if decoration.dialog is not None: dialog = decoration.dialog else: dialog = [] break elif decoration.type.remove_with_key: key_item = self.game_state.game_info.items[ 'Key'] if self.game_state.hero_party.has_item(key_item.name) \ and isinstance(key_item, Tool) \ and key_item.use_dialog is not None: dialog = [ 'It is locked. Do you want to open it with a key?', { 'Yes': key_item.use_dialog, 'No': None } ] else: dialog = ['It is locked.'] break if not requires_removal and decoration.dialog is not None: dialog = decoration.dialog self.gde.dialog_loop(dialog) if self.game_state.hero_party.members[0].curr_pos_dat_tile != \ self.game_state.hero_party.members[0].dest_pos_dat_tile: self.scroll_tile() else: # print('advancing one tick in exploring_loop', flush=True) self.game_state.advance_tick()
def load_annotation(self): if self.project is not None and \ self.annotation is not None: self.ui.time_begin.setTime(self.annotation.annotation_time) self.ui.textEdit.append(self.annotation.description) if self.annotation.interaction is not None: show_content = self.annotation.interaction self.ui.ckb_compulsory.setChecked(show_content.compulsory) self.ui.ckb_interactive.setChecked(show_content.interactive) self.ui.ckb_allows_end_content.setChecked(show_content.allow_end_content) self.ui.ckb_show_on_tv.setChecked(show_content.tv) self.ui.ckb_show_on_mobile.setChecked(show_content.mobile) self.ui.ckb_pause_main_video.setChecked(show_content.pause_main_video) self.ui.ckb_viber.setChecked(show_content.viber_alert) if show_content.sound_alert is not None: self.ui.ckb_audio.setChecked(True) self.audio_path = unicode(show_content.sound_alert) self.audio_player = AudioPlayer() self.ui.txt_sound.clear() self.ui.txt_sound.append(os.path.split(self.audio_path)[1]) self.audio_player.load_audio(self.audio_path) self.audio_player.setVisible(False) if show_content.icon.image == model.Icon.INFO: self.ui.radio_info.setChecked(True) elif show_content.icon.image == model.Icon.SEXUAL: self.ui.radio_sexual.setChecked(True) elif show_content.icon.image == model.Icon.VIOLENCE: self.ui.radio_violence.setChecked(True) elif show_content.icon.image == model.Icon.YES: self.ui.radio_yes.setChecked(True) elif show_content.icon.image == model.Icon.NO: self.ui.radio_no.setChecked(True) else: self.ui.radio_personalized.setChecked(True) self.icon_path = show_content.icon.image self.ui.radio_personalized.setIcon(QtGui.QIcon(self.icon_path)) self.ui.cmb_button.setEnabled(True) if show_content.button is not None: self.ui.cmb_button.setCurrentIndex(self.buttons.index(\ show_content.button)) if show_content.icon.position == model.Icon.BOT_LEFT: self.ui.radio_bl.setChecked(True) elif show_content.icon.position == model.Icon.BOT_RIGHT: self.ui.radio_br.setChecked(True) elif show_content.icon.position == model.Icon.TOP_LEFT: self.ui.radio_tl.setChecked(True) elif show_content.icon.position == model.Icon.TOP_RIGHT: self.ui.radio_tr.setChecked(True) elif show_content.icon.position == model.Icon.PERSONALIZED: self.ui.radio_free_position.setChecked(True) self.set_icon_boundaries(show_content.icon.bondaries) before_str = str(show_content.icon.relative_time) for index in xrange(self.ui.cmb_icon_before.count()): str_index = str(self.ui.cmb_icon_before.itemText(index)) if before_str == str_index: self.ui.cmb_icon_before.setCurrentIndex(index) break duration_str = str(show_content.icon.duration_time) for index in xrange(self.ui.cmb_icon_duration.count()): str_index = str(self.ui.cmb_icon_duration.itemText(index)) if duration_str == str_index: self.ui.cmb_icon_duration.setCurrentIndex(index) break for content in show_content.contents: self.add_media_widget.add_media_item(content) if show_content.interaction_type == model.ShowContent.SHOW_CONTENT: self.ui.radio_show.setChecked(True) elif show_content.interaction_type == model.ShowContent.SKIP: self.ui.radio_skip.setChecked(True) self.ui.time_skip_point.setTime(util.sec_to_qtime(show_content.skip_point)) elif show_content.interaction_type == model.ShowContent.BACK_5: self.ui.radio_back.setChecked(True) elif show_content.interaction_type == model.ShowContent.BACK_TO: self.ui.radio_back_to.setChecked(True) self.ui.time_back_point.setTime(util.sec_to_qtime(show_content.back_point)) self.ui.time_back_limite.setTime(util.sec_to_qtime(show_content.back_limite)) self.ui.btn_preview.setEnabled(True) self.add_media_widget.update_media_list()
# initialize input system inputProcess = InputWorker() inputProcess.start() # initialize scanner scannerProcess = ScannerWorker() scannerProcess.start() # merge the outputs into a single queue qm = QueueMerge() qm.add_input_queue(inputProcess.output_queue) qm.add_input_queue(scannerProcess.output_queue) # initialize audio player player = AudioPlayer() while True: # Endless work loop. We read an event and act on it event = qm.outputQueue.get() # print(event.__str__()) if isinstance(event, ScannerEvent): # Scanner event handler block if isinstance(event, RootPathAppeared): mdb.add_root_path(event.rootPath) elif isinstance(event, RootPathRemoved): mdb.remove_root_path(event.rootPath) elif isinstance(event, AudioFileFound): mdb.add_entry(event.path, event.gain_level) print(event.path.__str__() + ": " + event.gain_level.__str__())
from EventParser import EventParser from Commentator import Commentator from AudioRenderer import AudioRenderer from AudioPlayer import AudioPlayer from threading import Thread from Queue import Queue import time eventQueue = Queue() commentatorQueue = Queue() rendererQueue = Queue() eventParser = EventParser(eventQueue) commentator = Commentator(eventQueue, commentatorQueue) audioRenderer = AudioRenderer(commentatorQueue, rendererQueue) audioPlayer = AudioPlayer(rendererQueue) eventParser.daemon = True eventParser.start() commentator.daemon = True commentator.start() audioRenderer.daemon = True audioRenderer.start() audioPlayer.daemon = True audioPlayer.start() while True: time.sleep(100)
def auralizer_thread(player, frame, cont): prev_frame = frame[0] while cont[0]: t0 = time.time() video_data = frame[0] audio_data = auralizer.auralize(video_data, prev_frame) print(audio_data) player.play_multiple_chords(audio_data) prev_frame = video_data time.sleep(max(0, DELTA - (time.time() - t0))) DELTA = 60 / AudioPlayer.BPM if __name__ == "__main__": player = AudioPlayer() prev_frame = None cont = [True] video_stream = XiaoYiActionCamera().open_stream() # video_stream = VideoStream(cv2.VideoCapture(0)) frame = video_stream.read_frame() filters.registerMotionFilter(MotionFilter(frame.shape)) frame = [frame] t1 = threading.Thread(target=stream_thread, args=(video_stream, frame, cont)) t1.start()
# initialize pygame pygame.init() pygame.mouse.set_visible(False) # init rendering screen displaymode = (IMAGE_WIDTH, IMAGE_HEIGHT) screen = pygame.display.set_mode(displaymode) pygame.display.toggle_fullscreen() # load cover image cover = pygame.image.load(IMAGE_NAME).convert() # set cover position position = pygame.Rect((0, -IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_HEIGHT)) screen.blit(cover, position) audioPlayer = AudioPlayer(AUDIO_NAME) videoPlayer = VideoPlayer(VIDEO_NAME, IMAGE_WIDTH, IMAGE_HEIGHT, USE_VLC, audioPlayer, pygame) inputLen = getInputDevices() prevInputLen = inputLen # MAIN LOOP while True: pygame.mouse.set_cursor((8, 8), (0, 0), (0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0)) try: if areThereNewInputsDevices(): hardKill() killAll()
def initiate_encounter( self, monster_info: Optional[MonsterInfo] = None, approach_dialog: Optional[DialogType] = None, victory_dialog: Optional[DialogType] = None, run_away_dialog: Optional[DialogType] = None, encounter_music: Optional[str] = None, message_dialog: Optional[GameDialog] = None) -> None: # TODO: Make the conditions for no monsters configurable if self.hero_party.has_item('Ball of Light'): return # Determine the monster party for the encounter if monster_info is None: # Check for special monsters special_monster_info = self.get_special_monster() if special_monster_info is not None: monster_party = MonsterParty( [MonsterState(special_monster_info)]) approach_dialog = special_monster_info.approach_dialog victory_dialog = special_monster_info.victory_dialog run_away_dialog = special_monster_info.run_away_dialog else: monster_info = self.game_info.monsters[random.choice( self.get_tile_monsters())] monster_party = MonsterParty([MonsterState(monster_info)]) else: monster_party = MonsterParty([MonsterState(monster_info)]) if self.hero_party.is_monster_party_repelled(monster_party, self.is_outside()): return # A combat encounter requires an encounter background encounter_background = self.game_info.maps[ self.get_map_name()].encounter_background if encounter_background is None: encounter_background = self.get_encounter_background() if encounter_background is None: print( 'Failed to initiate combat encounter due to lack of encounter image in map ' + self.get_map_name(), flush=True) return # Perform the combat encounter CombatEncounter.static_init('combat') self.combat_encounter = CombatEncounter( game_info=self.game_info, game_state=self, monster_party=monster_party, encounter_background=encounter_background, message_dialog=message_dialog, approach_dialog=approach_dialog, victory_dialog=victory_dialog, run_away_dialog=run_away_dialog, encounter_music=encounter_music) self.combat_encounter.encounter_loop() self.combat_encounter = None # Play the music for the current map AudioPlayer().play_music( self.game_info.maps[self.get_map_name()].music) # Clear event queue GameEvents.clear_events()
def main(): # set path where the results are stored results_path = Path('./results_inear_exp/') results_path.mkdir(parents=False, exist_ok=True) # We have 2 conditions (monaural, binaural). In each condition, two different sounds are randomly played conditions = ['mono', 'bin'] # set number of trials per condition. each sound is then played N_TRIALS/2 # make sure this numer is divideable by 2 (sounds) and 13 (number of speakers) assert (N_TRIALS % 2 == 0 and N_TRIALS % N_SPEAKERS == 0) # ask for user id print(Fore.GREEN + 'Please enter participant id: ' + Style.RESET_ALL) user_id = input() # create the path to the data file date = datetime.now() resultsFile = 'userid_' + user_id + '_date_' + date.strftime( '%d.%m.%Y') + '_time_' + date.strftime('%H.%M') + '.csv' resultsStoredIn = results_path / resultsFile # start by creating a new data file to store the data. # data is stored continously, so in case of a crash the data is not lost. with open(resultsStoredIn.as_posix(), mode='w', newline='') as resFile: # create the file res_file_writer = csv.writer(resFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) # add headers res_file_writer.writerow([ 'trial', # Trial 'line_number', # line number (speaker number) 'user_estimate', # perceived elevation in degree 'sound_type', # type of the sound 'condition', # condition 'reaction_time', # time for participant to respond 'user_id' # id of the user ]) # Ask for dominant ear # left channel is 0, right channel is 1 print(Fore.GREEN + 'Which is the dominat ear (0 for right, 1 for left): ' + Style.RESET_ALL) dominant_ear = int(input()) assert (dominant_ear == 0 or dominant_ear == 1) ### Monaural condition is the first ### clear_screen() print(Fore.GREEN + 'All set. Experiments is about to start...' + Style.RESET_ALL) # Initialize AudioPlayer audio_player = AudioPlayer(dummy=dummy_audio_player) # Initialize Arduino Reader arduino_reader = ArduinoReader(port=ARDUINO_PORT, dummy=dummy_arduino_reader) # Initialize the audio player audio_player = AudioPlayer(dummy=dummy_audio_player) # Zeroing of the angle encoder print( Fore.RED + 'Confirm that the handle is in zero position (pointing upwards)' + Style.RESET_ALL) input() arduino_reader.zeroing() clear_screen() print(Back.RED + '###### Experiment is starting NOW ######' + Style.RESET_ALL) print(Back.RED + '########################################' + Style.RESET_ALL) print(Back.RED + '########################################' + Style.RESET_ALL) print(Back.RED + '----------------------------------------' + Style.RESET_ALL) print( Back.RED + 'Recording of sounds will start whne participant presses button once' + Style.RESET_ALL) # Wait for the participant to press button arduino_reader.get_data() # Start the recording process # recording_data_path = record_sounds(N_SPEAKERS, audio_player=audio_player, results_path=results_path, user_id=user_id) recording_data_path = recording_data_path = results_path / ( 'participant_' + user_id) clear_screen() print(Back.GREEN + 'Recording successful!' + Style.RESET_ALL) input() clear_screen() print(Back.RED + 'Provide participant with headphones.' + Style.RESET_ALL) print(Back.RED + 'Make sure that left and right headphones are place correctly.' + Style.RESET_ALL) input() clear_screen() ###### Test correct working of headphones ###### print(Back.RED + 'Testing headphones!' + Style.RESET_ALL) ###### Play recorded sound ###### file_name = 'userid_' + str(user_id) + '_speakerNum_' + str( 5) + '_soundType_white.wav' file_to_play = recording_data_path / file_name # Extract data and sampling rate from file data, fs = sf.read(file_to_play.as_posix(), dtype='int16') sd.play(data, fs) print( Back.RED + 'Ask participant if sound from headphones can be heard well (y) or (n)?' + Style.RESET_ALL) hear_well = input() if hear_well == 'y': print(Back.GREEN + 'Very Well...' + Style.RESET_ALL) else: print(Back.RED + 'Aborting the experiment....' + Style.RESET_ALL) print(Back.RED + 'Redo the recording procedure!' + Style.RESET_ALL) exit() clear_screen() print(Back.RED + '###### Experiment is starting NOW ######' + Style.RESET_ALL) print(Back.RED + '########################################' + Style.RESET_ALL) print(Back.RED + '########################################' + Style.RESET_ALL) print(Back.RED + '----------------------------------------' + Style.RESET_ALL) print(Fore.GREEN + 'Participant starts experiment by pressing the button \n' + Style.RESET_ALL) # wait for button press of participant arduino_reader.get_data() # create tupels of all speakers with all sound types 10 speakers * 2 sounds * conditions = 20 tuples stimulus_sequence = [(i, j, k) for i in np.arange(N_SPEAKERS) for j in np.arange(2) for k in conditions] # We need to walk over this sequence to ensure that we tested all speakers, conditions and sounds random_sequence = create_rand_balanced_order(n_items=N_SPEAKERS * 2 * 2, n_trials=N_TRIALS * 2) print(stimulus_sequence) print(random_sequence) print(len(random_sequence)) # walk over random sequence for i_trial, i_tuple in enumerate(random_sequence): # decode the stimulus sequence num_speaker = stimulus_sequence[i_tuple][0] sound_type = stimulus_sequence[i_tuple][1] cond = stimulus_sequence[i_tuple][2] if sound_type == 1: sound_type_name = 'rippled' else: sound_type_name = 'white' print(Fore.GREEN + 'The following condition is tested: ' + cond + '\n' + Style.RESET_ALL) ###### Play recorded sound here ###### file_name = 'userid_' + str(user_id) + '_speakerNum_' + str( num_speaker) + '_soundType_' + sound_type_name + '.wav' file_to_play = recording_data_path / file_name # Extract data and sampling rate from file data, fs = sf.read(file_to_play.as_posix(), dtype='int16') # remove one side in mono condition if cond == 'mono': # left channel is 0, right channel is 1 data[:, dominant_ear] = 0 sd.play(data, fs) # start measuring the time ts = datetime.now() # get participant response print('Waiting for participant response...') user_estimate = arduino_reader.get_data() # calculate reaction time reaction_time = (datetime.now() - ts).total_seconds() # create data entry and add it to file result_item = [ i_trial, # Trial num_speaker, # line number (speaker number) user_estimate, # perceived elevation in degree sound_type_name, # type of the sound cond, # condition reaction_time, user_id # id of the user ] # write it to file res_file_writer.writerow(result_item) # wait some time until playing the next sound time.sleep(0.5) print(Back.GREEN + 'Experiment has finished' + Style.RESET_ALL)
def __init__(self): super().__init__() self.setupUi(self) self.text_to_speach_list = [ self.Text_to_Speech, self.Text_to_Speech_2, self.Text_to_Speech_3, self.Text_to_Speech_4 ] self.canvas = Canvas() self.canvas_comp = QtWidgets.QVBoxLayout(self.Mel_spec) self.canvas_comp.addWidget(self.canvas) self.toolbar = NavigationToolbar(self.canvas, self) self.canvas_comp.addWidget(self.toolbar) self.canvas1 = Canvas_spectr(self, width=21, height=4) self.Spectrum.setStyleSheet( " background-color: none; selection-background-color: none;") self.canvas1.setStyleSheet( "background-color: none; selection-background-color: none;") self.SpectrAudio = QtWidgets.QStackedLayout(self.Spectrum) self.SpectrAudio.insertWidget(0, self.canvas1) self._Syntethis = Synthesis() self.player = AudioPlayer() self.speakerlist = SpeakerList() self.speakerlist.setStyleSheet("border: 0; height: 50px;") self.speakerlist_layout = QtWidgets.QVBoxLayout(self.SpeakerList) self.speakerlist_layout.addWidget(self.speakerlist) self.speakerlist.set_model.connect(self._Syntethis.T2LoadModel) self.speakerlist.load_data() self.LoadWaveGlowModel.clicked.connect(self.WGSelectModel) self.Syntethis.clicked.connect(self.SyntethisFunc) self.PlayWav.clicked.connect(self.playSpeeck) self.output_directory_button.clicked.connect(self._output_directory) self.log_directory_button.clicked.connect(self._log_directory) self.checkpoint_path_Box.stateChanged.connect( self._path_checkpoint_enabled) self.path_checkpoint_button.clicked.connect(self._path_checkpoint) self.training_files_button.clicked.connect(self._training_files) self.validation_files_button.clicked.connect(self._validation_files) self.AudioParametersBox.currentIndexChanged.connect( self._Syntethis.SetCurrentAudioParameters) self.AudioParametersBox.currentIndexChanged.connect( self.SetAudioParameters) self.StartTrain.clicked.connect(self._start_train) self.SaveAudio.clicked.connect(self.saveFile) self.Stop.clicked.connect(self._stop_train) self.VolumeSlider.valueChanged.connect(self.player.set_voulume) self.PauseWav.clicked.connect(self.player.pause) self.StopWav.clicked.connect(self.player.stop) self.double_slider = DoubleSlider() self.double_slider.setOrientation(QtCore.Qt.Horizontal) self.SpectrAudio.insertWidget(0, self.double_slider) self.SpectrAudio.setStackingMode(self.SpectrAudio.StackAll) self.SpectrAudio.setCurrentWidget(self.double_slider) self.double_slider.setStyleSheet( "QSlider{ border: 0; background-color: none; selection-background-color: none;}" "QSlider::groove:horizontal {border: 1px solid #000000; }" "QSlider::handle:horizontal{ border: 10px solide #5c5c5c;" "background: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #b4b4b4, stop:1 #8f8f8f); }" " }" "QSlider::add-page:horizontal{ background: rgba(255, 255, 255, 10%);}" "QSlider::sub-page:horizontal{ background: rgba(255,140,0, 40%);}") self.player.positionChanged.connect(self.setPosMax) self.StartTrain.setToolTip( "checkpoint_path + warm_start - если модель уже обученная и начать обучение новой модели \n" " только checkpoint_path - не модель обученная модель а точка остановки, продолжит \n " "без checkpoint_path и warm_start - модель с нуля \n ") self.SetAudioParameters() self.load_data()
""" This Script Tests the functionality of the AudioPlayer module. Warning: there is a very real danger of screaming feedback, so lower speaker volume before using! """ from AudioPlayer import AudioPlayer player = AudioPlayer.AudioPlayer() player.load( "/Users/nolanmeske/Documents/Music Projects/4280/Orbital/renders/Orbital.wav" ) player.play() del player
def __init__(self): self.audioPlayer = AudioPlayer() self.update_song_list()
class ShowContent(QtGui.QDialog): def __init__(self, project=None, annotation=None, interaction=None, parent=None): super(ShowContent, self).__init__(parent) self.is_editing_time = False self.project = project self.annotation = annotation self.interaction = interaction self.ui = Ui_ShowContent() self.ui.setupUi(self) self.ui.btn_preview.setVisible(False) self.init_ui() self.audio_path = None self.buttons = ["RED", "GREEN", "BLUE", "YELLOW"] self.load_annotation() self.result = None def timer_focus_in(self): self.player.pause() self.is_editing_time = True def load_annotation(self): if self.project is not None and \ self.annotation is not None: self.ui.time_begin.setTime(self.annotation.annotation_time) self.ui.textEdit.append(self.annotation.description) if self.annotation.interaction is not None: show_content = self.annotation.interaction self.ui.ckb_compulsory.setChecked(show_content.compulsory) self.ui.ckb_interactive.setChecked(show_content.interactive) self.ui.ckb_allows_end_content.setChecked(show_content.allow_end_content) self.ui.ckb_show_on_tv.setChecked(show_content.tv) self.ui.ckb_show_on_mobile.setChecked(show_content.mobile) self.ui.ckb_pause_main_video.setChecked(show_content.pause_main_video) self.ui.ckb_viber.setChecked(show_content.viber_alert) if show_content.sound_alert is not None: self.ui.ckb_audio.setChecked(True) self.audio_path = unicode(show_content.sound_alert) self.audio_player = AudioPlayer() self.ui.txt_sound.clear() self.ui.txt_sound.append(os.path.split(self.audio_path)[1]) self.audio_player.load_audio(self.audio_path) self.audio_player.setVisible(False) if show_content.icon.image == model.Icon.INFO: self.ui.radio_info.setChecked(True) elif show_content.icon.image == model.Icon.SEXUAL: self.ui.radio_sexual.setChecked(True) elif show_content.icon.image == model.Icon.VIOLENCE: self.ui.radio_violence.setChecked(True) elif show_content.icon.image == model.Icon.YES: self.ui.radio_yes.setChecked(True) elif show_content.icon.image == model.Icon.NO: self.ui.radio_no.setChecked(True) else: self.ui.radio_personalized.setChecked(True) self.icon_path = show_content.icon.image self.ui.radio_personalized.setIcon(QtGui.QIcon(self.icon_path)) self.ui.cmb_button.setEnabled(True) if show_content.button is not None: self.ui.cmb_button.setCurrentIndex(self.buttons.index(\ show_content.button)) if show_content.icon.position == model.Icon.BOT_LEFT: self.ui.radio_bl.setChecked(True) elif show_content.icon.position == model.Icon.BOT_RIGHT: self.ui.radio_br.setChecked(True) elif show_content.icon.position == model.Icon.TOP_LEFT: self.ui.radio_tl.setChecked(True) elif show_content.icon.position == model.Icon.TOP_RIGHT: self.ui.radio_tr.setChecked(True) elif show_content.icon.position == model.Icon.PERSONALIZED: self.ui.radio_free_position.setChecked(True) self.set_icon_boundaries(show_content.icon.bondaries) before_str = str(show_content.icon.relative_time) for index in xrange(self.ui.cmb_icon_before.count()): str_index = str(self.ui.cmb_icon_before.itemText(index)) if before_str == str_index: self.ui.cmb_icon_before.setCurrentIndex(index) break duration_str = str(show_content.icon.duration_time) for index in xrange(self.ui.cmb_icon_duration.count()): str_index = str(self.ui.cmb_icon_duration.itemText(index)) if duration_str == str_index: self.ui.cmb_icon_duration.setCurrentIndex(index) break for content in show_content.contents: self.add_media_widget.add_media_item(content) if show_content.interaction_type == model.ShowContent.SHOW_CONTENT: self.ui.radio_show.setChecked(True) elif show_content.interaction_type == model.ShowContent.SKIP: self.ui.radio_skip.setChecked(True) self.ui.time_skip_point.setTime(util.sec_to_qtime(show_content.skip_point)) elif show_content.interaction_type == model.ShowContent.BACK_5: self.ui.radio_back.setChecked(True) elif show_content.interaction_type == model.ShowContent.BACK_TO: self.ui.radio_back_to.setChecked(True) self.ui.time_back_point.setTime(util.sec_to_qtime(show_content.back_point)) self.ui.time_back_limite.setTime(util.sec_to_qtime(show_content.back_limite)) self.ui.btn_preview.setEnabled(True) self.add_media_widget.update_media_list() @QtCore.pyqtSlot(int) def update_time_edit(self, time): if not self.is_editing_time: time_edit = self.ui.time_media time_edit.setTime(QtCore.QTime().addMSecs(time)) @QtCore.pyqtSlot(QtCore.QTime) def editing_time(self, time): '''if self.is_editing_time: total_time = self.player.player.mediaObject().totalTime() max_time = QtCore.QTime(0,0,0).addMSecs(total_time) self.ui.time_edit.setTimeRange(QtCore.QTime(0,0,0), max_time) msec = time.hour() msec = time.minute() + msec*60 msec = time.second() + msec*60 msec = msec*1000 + time.msec() self.ui.video_player.seek(msec)''' pass def get_result(self): return self.result def set_icon_boundaries(self, bound): self.lbl_icon.move(bound.left, bound.top) self.lbl_icon.resize(bound.width, bound.height) def get_icon_bondaries(self): bound = Bondaries() qpoint = self.lbl_icon.pos() qsize = self.lbl_icon.size() bound.width = qsize.width() bound.height = qsize.height() bound.left = qpoint.x() bound.top = qpoint.y() qsize = self.lbl_screen.size() bound.screen_width, bound.screen_height = qsize.width(), qsize.height() print bound return bound def init_ui(self): self.setFixedSize(self.size()) self.ui.radio_info.setChecked(True) self.ui.radio_tl.setChecked(True) self.ui.btn_choose_icon.setEnabled(False) self.ui.tabs.setCurrentIndex(0) layout = QtGui.QHBoxLayout() self.ui.tab_content.setLayout(layout) self.add_media_widget = AddMediaWidget(self.project, self.annotation) layout.addWidget(self.add_media_widget) self.ui.radio_personalized.toggled.connect(self.personalized_choosed) self.ui.btn_choose_icon.clicked.connect(self.choose_icon) self.lbl_screen = QtGui.QLabel(self.ui.widget_icon) self.lbl_screen.resize(self.ui.widget_icon.size()) self.lbl_screen.setStyleSheet('background-color: rgb(0, 0, 0); \ border-color: rgb(255, 255, 255);\ font: 75 28pt "Ubuntu";\ color: rgb(255, 255, 127);') self.lbl_screen.setAlignment(QtCore.Qt.AlignCenter) self.lbl_screen.setText("Tela") self.lbl_icon = MovebleLabel(u"", self.ui.widget_icon) self.lbl_icon.resize(30,30) self.lbl_icon.move(10,10) self.lbl_icon.setStyleSheet('background-color: rgb(255, 255, 0);\ border-color: rgb(255, 255, 255);\ font: 75 8pt "Ubuntu";\ color: rgb(0, 0, 0);') self.lbl_icon.setEnabled(False) self.ui.radio_bl.toggled.connect(self.selected_bl) self.ui.radio_br.toggled.connect(self.selected_br) self.ui.radio_tr.toggled.connect(self.selected_tr) self.ui.radio_tl.toggled.connect(self.selected_tl) self.ui.radio_free_position.toggled.connect(self.selected_free_position) self.ui.btn_ok.clicked.connect(self.ok_pressed) self.ui.btn_cancel.clicked.connect(self.cancel_pressed) self.ui.btn_preview.clicked.connect(self.preview) self.ui.btn_preview.setEnabled(False) #player_holder = self.ui.player_widget #layout = QtGui.QVBoxLayout() self.player = VideoPlayer(parent=self.ui.player_widget) #layout.addWidget(self.player) #self.ui.tab_interaction.setLayout(layout) self.player.load_video(self.project.main_media) self.player.player.mediaObject().tick.connect(self.update_time_edit) #self.player.move(50,0) self.ui.radio_show.toggled.connect(self.selected_show) self.ui.radio_back.toggled.connect(self.selected_back) self.ui.radio_back_to.toggled.connect(self.selected_back_to) self.ui.radio_skip.toggled.connect(self.selected_skip) self.ui.btn_back_limite.clicked.connect(self.use_time_for_back_limite) self.ui.btn_back_point.clicked.connect(self.use_time_for_back_point) self.ui.btn_skip.clicked.connect(self.use_time_for_skip) self.ui.time_media.timeChanged.connect(self.editing_time) self.ui.time_media.installEventFilter(self) self.ui.time_media.setEnabled(False) self.ui.radio_show.setChecked(True) self.ui.ckb_audio.toggled.connect(self.toogled_audio_alert) self.ui.btn_choose_audio.clicked.connect(self.choose_audio) self.ui.btn_play_audio.clicked.connect(self.play_audio) @QtCore.pyqtSlot(bool) def toogled_audio_alert(self, value): if value: self.ui.btn_choose_audio.setEnabled(True) self.ui.btn_play_audio.setEnabled(True) self.ui.txt_sound.setEnabled(True) else: self.ui.btn_choose_audio.setEnabled(False) self.ui.btn_play_audio.setEnabled(False) self.ui.txt_sound.setEnabled(False) @QtCore.pyqtSlot() def choose_audio(self): path = QtGui.QFileDialog.getOpenFileName(self, u'Selecione um áudio', HOME_DIRECTORY, model.CONTENT_TYPES[model.Media.AUDIO]) if path == None: return self.audio_path = unicode(path) self.audio_player = AudioPlayer() self.ui.txt_sound.clear() self.ui.txt_sound.append(os.path.split(self.audio_path)[1]) self.audio_player.load_audio(self.audio_path) self.audio_player.setVisible(False) @QtCore.pyqtSlot() def play_audio(self): self.audio_player.play() def eventFilter(self, myObject, event): if myObject == self.ui.time_media: if event.type() == QtCore.QEvent.FocusIn: self.timer_focus_in() elif event.type() == QtCore.QEvent.FocusOut: pass return False @QtCore.pyqtSlot() def use_time_for_back_limite(self): self.ui.time_back_limite.setTime(self.ui.time_media.time()) def use_time_for_back_point(self): self.ui.time_back_point.setTime(self.ui.time_media.time()) def use_time_for_skip(self): self.ui.time_skip_point.setTime(self.ui.time_media.time()) @QtCore.pyqtSlot(bool) def selected_show(self, value): self.player.stop() self.player.setVisible(False) self.ui.time_back_limite.setVisible(False) self.ui.time_back_point.setVisible(False) self.ui.time_skip_point.setVisible(False) self.ui.time_media.setVisible(False) self.ui.lbl_back_limite.setVisible(False) self.ui.lbl_back_point.setVisible(False) self.ui.lbl_current_time.setVisible(False) self.ui.lbl_skip.setVisible(False) self.ui.btn_skip.setVisible(False) self.ui.btn_back_limite.setVisible(False) self.ui.btn_back_point.setVisible(False) @QtCore.pyqtSlot(bool) def selected_back(self, value): self.player.stop() self.player.setVisible(False) self.ui.time_back_limite.setVisible(False) self.ui.time_back_point.setVisible(False) self.ui.time_skip_point.setVisible(False) self.ui.time_media.setVisible(False) self.ui.lbl_back_limite.setVisible(False) self.ui.lbl_back_point.setVisible(False) self.ui.lbl_current_time.setVisible(False) self.ui.lbl_skip.setVisible(False) self.ui.btn_skip.setVisible(False) self.ui.btn_back_limite.setVisible(False) self.ui.btn_back_point.setVisible(False) @QtCore.pyqtSlot(bool) def selected_back_to(self, value): self.player.stop() self.player.setVisible(True) self.ui.time_back_limite.setVisible(True) self.ui.time_back_point.setVisible(True) self.ui.time_skip_point.setVisible(False) self.ui.time_media.setVisible(True) self.ui.lbl_back_limite.setVisible(True) self.ui.lbl_back_point.setVisible(True) self.ui.lbl_current_time.setVisible(True) self.ui.lbl_skip.setVisible(False) self.ui.btn_skip.setVisible(False) self.ui.btn_back_limite.setVisible(True) self.ui.btn_back_point.setVisible(True) @QtCore.pyqtSlot(bool) def selected_skip(self, value): self.player.stop() self.player.setVisible(True) self.ui.time_back_limite.setVisible(False) self.ui.time_back_point.setVisible(False) self.ui.time_skip_point.setVisible(True) self.ui.time_media.setVisible(True) self.ui.lbl_back_limite.setVisible(False) self.ui.lbl_back_point.setVisible(False) self.ui.lbl_current_time.setVisible(True) self.ui.lbl_skip.setVisible(True) self.ui.btn_skip.setVisible(True) self.ui.btn_back_limite.setVisible(False) self.ui.btn_back_point.setVisible(False) @QtCore.pyqtSlot() def ok_pressed(self): show_content = model.ShowContent() show_content.compulsory = self.ui.ckb_compulsory.isChecked() show_content.interactive = self.ui.ckb_interactive.isChecked() show_content.allow_end_content = self.ui.ckb_allows_end_content.isChecked() show_content.tv = self.ui.ckb_show_on_tv.isChecked() show_content.mobile = self.ui.ckb_show_on_mobile.isChecked() show_content.pause_main_video = self.ui.ckb_pause_main_video.isChecked() show_content.viber_alert = self.ui.ckb_viber.isChecked() icon = model.Icon() if self.ui.radio_personalized.isChecked(): self.icon_path = util.copy_to_directory(self.project, unicode(self.icon_path)) icon.image = self.icon_path show_content.button = str(self.ui.cmb_button.currentText()) elif self.ui.radio_info.isChecked(): icon.image = model.Icon.INFO show_content.button = "GREEN" elif self.ui.radio_sexual.isChecked(): icon.image = model.Icon.SEXUAL show_content.button = "RED" elif self.ui.radio_violence.isChecked(): icon.image = model.Icon.VIOLENCE show_content.button = "RED" elif self.ui.radio_yes.isChecked(): icon.image = model.Icon.YES show_content.button = "GREEN" elif self.ui.radio_no.isChecked(): icon.image = model.Icon.NO show_content.button = "RED" icon.relative_time = int(self.ui.cmb_icon_before.itemText( self.ui.cmb_icon_before.currentIndex())) icon.duration_time = int(self.ui.cmb_icon_duration.itemText( self.ui.cmb_icon_duration.currentIndex())) icon.bondaries = self.get_icon_bondaries() if self.ui.radio_bl.isChecked(): icon.position = model.Icon.BOT_LEFT elif self.ui.radio_br.isChecked(): icon.position = model.Icon.BOT_RIGHT elif self.ui.radio_tl.isChecked(): icon.position = model.Icon.TOP_LEFT elif self.ui.radio_tr.isChecked(): icon.position = model.Icon.TOP_RIGHT elif self.ui.radio_free_position.isChecked(): icon.position = model.Icon.PERSONALIZED show_content.icon = icon if self.ui.ckb_audio.isChecked(): if self.audio_path is not None: realpath = util.copy_to_directory(self.project, self.audio_path) show_content.sound_alert = realpath for media in self.add_media_widget.medias: show_content.add_content(media) self.annotation.description = unicode(self.ui.textEdit.toPlainText()) self.annotation.interaction = show_content self.annotation.annotation_time = self.ui.time_begin.time() self.annotation.description = self.ui.textEdit.toPlainText() if self.ui.radio_show.isChecked(): show_content.interaction_type = model.ShowContent.SHOW_CONTENT elif self.ui.radio_skip.isChecked(): show_content.interaction_type = model.ShowContent.SKIP show_content.skip_point = util.qtime_to_sec(self.ui.time_skip_point.time()) elif self.ui.radio_back.isChecked(): show_content.interaction_type = model.ShowContent.BACK_5 elif self.ui.radio_back_to.isChecked(): print 'back_to' show_content.interaction_type = model.ShowContent.BACK_TO show_content.back_point = util.qtime_to_sec(self.ui.time_back_point.time()) show_content.back_limite = util.qtime_to_sec(self.ui.time_back_limite.time()) self.result = show_content self.close() @QtCore.pyqtSlot() def preview(self): import generation try: begintime = util.qtime_to_sec(self.annotation.annotation_time) begintime = begintime - int(self.ui.cmb_icon_before.currentText()) - 3 nclgenerator = generation.NclGenerator(self.project, generation.GenerationOptions()) print self.project.directory nclgenerator.dump_file(os.path.join(self.project.directory, 'medias', 'main.ncl'), begintime) current_path = os.path.dirname(os.path.realpath(__file__)) src = os.path.join(os.path.split(current_path)[0], 'files', 'medias', "connBase.ncl") dst = os.path.join(self.project.directory, "medias", "connBase.ncl") import shutil shutil.copy(src, dst) except: logging.exception('Error Generating the NCL') QtGui.QMessageBox.warning(self, u'Gerando NCL', u"Aconteceu um erro ao gerar o documento multimídia", QtGui.QMessageBox.Ok) return import subprocess as sp html = os.path.join(self.project.directory, "index.html") current_path = os.path.dirname(os.path.realpath(__file__)) script_path = os.path.join(os.path.split(current_path)[0], 'browser.py') sp.Popen(['python', script_path, html]) @QtCore.pyqtSlot() def cancel_pressed(self): self.close() @QtCore.pyqtSlot(bool) def selected_bl(self, value): self.lbl_icon.resize(30,30) self.lbl_icon.move(10,150) self.lbl_icon.setEnabled(False) @QtCore.pyqtSlot(bool) def selected_br(self, value): self.lbl_icon.resize(30,30) self.lbl_icon.move(300,150) self.lbl_icon.setEnabled(False) @QtCore.pyqtSlot(bool) def selected_tr(self, value): self.lbl_icon.resize(30,30) self.lbl_icon.move(300,10) self.lbl_icon.setEnabled(False) @QtCore.pyqtSlot(bool) def selected_tl(self, value): self.lbl_icon.resize(30,30) self.lbl_icon.move(10,10) self.lbl_icon.setEnabled(False) @QtCore.pyqtSlot(bool) def selected_free_position(self, value): self.lbl_icon.setEnabled(True) @QtCore.pyqtSlot() def choose_icon(self): path = QtGui.QFileDialog.getOpenFileName(self, u'Selecione uma imagem', HOME_DIRECTORY, model.CONTENT_TYPES[model.Media.IMAGE]) if path == None: return self.icon_path = path self.ui.radio_personalized.setIcon(QtGui.QIcon(path)) self.ui.radio_personalized.setText("...") @QtCore.pyqtSlot(bool) def personalized_choosed(self, checked): if checked: self.ui.btn_choose_icon.setEnabled(True) else: self.ui.btn_choose_icon.setEnabled(False)
def main() -> None: parser = argparse.ArgumentParser() parser.add_argument( '-g', '--gamepad', help='Gamepad (if present) will be used for providing user inputs', action='store_true', default=None) parser.add_argument('-k', '--keyboard', dest='gamepad', help='Keyboard will be used for providing user inputs', action='store_false') parser.add_argument('-u', '--force-use-unlicensed-assets', help='Force using the unlicensed assets', action='store_true', default=False) parser.add_argument('-p', '--skip_pip_install', dest='perform_pip_install', help='Skip performing a pip install', action='store_false', default=True) parser.add_argument('save', nargs='?', help='Load a specific saved game file') args = parser.parse_args() # print('args =', args, flush=True) # Determine if application is a script file or frozen exe if getattr(sys, 'frozen', False): # Executing as a pyinstaller binary executable application_path = os.path.dirname(sys.executable) elif __file__: # Normal execution application_path = os.path.dirname(__file__) # Load required Python libraries if args.perform_pip_install: subprocess.check_call([ sys.executable, '-m', 'pip', 'install', '-U', '-r', os.path.join(application_path, 'requirements.txt') ], stdout=subprocess.DEVNULL) application_name = 'pyDragonWarrior' saves_path = get_saves_path(application_path, application_name) os.environ[ 'PYGAME_HIDE_SUPPORT_PROMPT'] = '1' # Silence pygame outputs to standard out import pygame from AudioPlayer import AudioPlayer from GameDialog import GameDialog from GameLoop import GameLoop # Set the current working directory to the location of this file so that the game can be run from any path os.chdir(os.path.dirname(__file__)) base_path = os.path.split(os.path.abspath(__file__))[0] icon_image_filename = os.path.join(base_path, 'icon.png') pygame.init() pygame.mouse.set_visible(False) pygame.display.set_caption(application_name) if os.path.exists(icon_image_filename): try: icon_image = pygame.image.load(icon_image_filename) pygame.display.set_icon(icon_image) except: print('ERROR: Failed to load', icon_image_filename, flush=True) GameDialog.force_use_menus_for_text_entry = args.gamepad # Initialize the game if not args.force_use_unlicensed_assets and os.path.exists( os.path.join(base_path, 'data', 'licensed_assets')): game_xml_path = os.path.join(base_path, 'game_licensed_assets.xml') else: game_xml_path = os.path.join(base_path, 'game.xml') win_size_pixels = None # Point(2560, 1340) tile_size_pixels = 16 * 3 game_loop = GameLoop(saves_path, base_path, game_xml_path, win_size_pixels, tile_size_pixels) # Run the game game_loop.run(args.save) # Exit the game AudioPlayer().terminate() pygame.joystick.quit() pygame.quit()
class MainInterface(QtWidgets.QMainWindow, interface.Ui_MainWindow): def __init__(self): super().__init__() self.setupUi(self) self.text_to_speach_list = [ self.Text_to_Speech, self.Text_to_Speech_2, self.Text_to_Speech_3, self.Text_to_Speech_4 ] self.canvas = Canvas() self.canvas_comp = QtWidgets.QVBoxLayout(self.Mel_spec) self.canvas_comp.addWidget(self.canvas) self.toolbar = NavigationToolbar(self.canvas, self) self.canvas_comp.addWidget(self.toolbar) self.canvas1 = Canvas_spectr(self, width=21, height=4) self.Spectrum.setStyleSheet( " background-color: none; selection-background-color: none;") self.canvas1.setStyleSheet( "background-color: none; selection-background-color: none;") self.SpectrAudio = QtWidgets.QStackedLayout(self.Spectrum) self.SpectrAudio.insertWidget(0, self.canvas1) self._Syntethis = Synthesis() self.player = AudioPlayer() self.speakerlist = SpeakerList() self.speakerlist.setStyleSheet("border: 0; height: 50px;") self.speakerlist_layout = QtWidgets.QVBoxLayout(self.SpeakerList) self.speakerlist_layout.addWidget(self.speakerlist) self.speakerlist.set_model.connect(self._Syntethis.T2LoadModel) self.speakerlist.load_data() self.LoadWaveGlowModel.clicked.connect(self.WGSelectModel) self.Syntethis.clicked.connect(self.SyntethisFunc) self.PlayWav.clicked.connect(self.playSpeeck) self.output_directory_button.clicked.connect(self._output_directory) self.log_directory_button.clicked.connect(self._log_directory) self.checkpoint_path_Box.stateChanged.connect( self._path_checkpoint_enabled) self.path_checkpoint_button.clicked.connect(self._path_checkpoint) self.training_files_button.clicked.connect(self._training_files) self.validation_files_button.clicked.connect(self._validation_files) self.AudioParametersBox.currentIndexChanged.connect( self._Syntethis.SetCurrentAudioParameters) self.AudioParametersBox.currentIndexChanged.connect( self.SetAudioParameters) self.StartTrain.clicked.connect(self._start_train) self.SaveAudio.clicked.connect(self.saveFile) self.Stop.clicked.connect(self._stop_train) self.VolumeSlider.valueChanged.connect(self.player.set_voulume) self.PauseWav.clicked.connect(self.player.pause) self.StopWav.clicked.connect(self.player.stop) self.double_slider = DoubleSlider() self.double_slider.setOrientation(QtCore.Qt.Horizontal) self.SpectrAudio.insertWidget(0, self.double_slider) self.SpectrAudio.setStackingMode(self.SpectrAudio.StackAll) self.SpectrAudio.setCurrentWidget(self.double_slider) self.double_slider.setStyleSheet( "QSlider{ border: 0; background-color: none; selection-background-color: none;}" "QSlider::groove:horizontal {border: 1px solid #000000; }" "QSlider::handle:horizontal{ border: 10px solide #5c5c5c;" "background: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #b4b4b4, stop:1 #8f8f8f); }" " }" "QSlider::add-page:horizontal{ background: rgba(255, 255, 255, 10%);}" "QSlider::sub-page:horizontal{ background: rgba(255,140,0, 40%);}") self.player.positionChanged.connect(self.setPosMax) self.StartTrain.setToolTip( "checkpoint_path + warm_start - если модель уже обученная и начать обучение новой модели \n" " только checkpoint_path - не модель обученная модель а точка остановки, продолжит \n " "без checkpoint_path и warm_start - модель с нуля \n ") self.SetAudioParameters() self.load_data() def SetAudioParameters(self): self.max_wav_value.setValue(self._Syntethis.AudioParameters[ self._Syntethis.currentAudioParameters][0]) self.sampling_rate.setValue(self._Syntethis.AudioParameters[ self._Syntethis.currentAudioParameters][1]) self.filter_length.setValue(self._Syntethis.AudioParameters[ self._Syntethis.currentAudioParameters][2]) self.hop_length.setValue(self._Syntethis.AudioParameters[ self._Syntethis.currentAudioParameters][3]) self.win_length.setValue(self._Syntethis.AudioParameters[ self._Syntethis.currentAudioParameters][4]) self.n_mel_channels.setValue(self._Syntethis.AudioParameters[ self._Syntethis.currentAudioParameters][5]) self.mel_fmin.setValue(self._Syntethis.AudioParameters[ self._Syntethis.currentAudioParameters][6]) self.mel_fmax.setValue(self._Syntethis.AudioParameters[ self._Syntethis.currentAudioParameters][7]) self.batch_size.setValue(8) def _train_param_load(self): if os.path.isfile("train_param.txt"): start_param = Serializ.load("train_param.txt") self.epoch.setValue(start_param[0][0]) self.iters_per_checkpoint.setValue(start_param[0][1]) self.cudnn_enabled.setChecked(start_param[0][2]) self.cudnn_benchmark.setChecked(start_param[0][3]) self.load_mel_from_disk.setChecked(start_param[0][4]) self.training_files.setText(start_param[0][5]) self.validation_files.setText(start_param[0][6]) self.encoder_kernel_size.setValue(start_param[1][0]) self.encoder_n_convolutions.setValue(start_param[1][1]) self.encoder_embedding_dim.setValue(start_param[1][2]) self.decoder_rnn_dim.setValue(start_param[1][3]) self.prenet_dim.setValue(start_param[1][4]) self.max_decoder_steps.setValue(start_param[1][5]) self.gate_threshold.setValue(start_param[1][6]) self.p_attention_dropout.setValue(start_param[1][7]) self.p_decoder_dropout.setValue(start_param[1][8]) self.attention_rnn_dim.setValue(start_param[2][0]) self.attention_dim.setValue(start_param[2][1]) self.attention_location_n_filters.setValue(start_param[2][2]) self.attention_location_kernel_size.setValue(start_param[2][3]) self.postnet_embedding_dim.setValue(start_param[3][0]) self.postnet_kernel_size.setValue(start_param[3][1]) self.postnet_n_convolutions.setValue(start_param[3][2]) self.use_saved_learning_rate.setChecked(start_param[4][0]) self.mask_padding.setChecked(start_param[4][1]) self.learning_rate.setValue(start_param[4][2]) self.weight_decay.setValue(start_param[4][3]) self.batch_size.setValue(start_param[4][4]) def _start_train(self): ExperimentDataParameters = [ self.epoch.value(), self.iters_per_checkpoint.value(), self.cudnn_enabled.isChecked(), self.cudnn_benchmark.isChecked(), self.load_mel_from_disk.isChecked(), self.training_files.text(), self.validation_files.text(), self.text_cleaners_box.currentText() ] EncoderDecoderParameters = [ self.encoder_kernel_size.value(), self.encoder_n_convolutions.value(), self.encoder_embedding_dim.value(), self.decoder_rnn_dim.value(), self.prenet_dim.value(), self.max_decoder_steps.value(), self.gate_threshold.value(), self.p_attention_dropout.value(), self.p_decoder_dropout.value() ] AttentionLocationLayerParameters = [ self.attention_rnn_dim.value(), self.attention_dim.value(), self.attention_location_n_filters.value(), self.attention_location_kernel_size.value() ] MelProcessingNetworkNarameters = [ self.postnet_embedding_dim.value(), self.postnet_kernel_size.value(), self.postnet_n_convolutions.value() ] OptimizationHyperparameters = [ self.use_saved_learning_rate.isChecked(), self.mask_padding.isChecked(), self.learning_rate.value(), self.weight_decay.value(), self.batch_size.value() ] Serializ.dump("train_param.txt", [ ExperimentDataParameters, EncoderDecoderParameters, AttentionLocationLayerParameters, MelProcessingNetworkNarameters, OptimizationHyperparameters ]) hpr = create_hparams( ExperimentDataParameters, self._Syntethis.AudioParameters[ self.AudioParametersBox.currentIndex()], EncoderDecoderParameters, AttentionLocationLayerParameters, MelProcessingNetworkNarameters, OptimizationHyperparameters) self._train_param_load() self.train = Train() self.train.log_signal.connect(self.setLog) self.x = Thread(target=self.train.train, args=( self.output_directory.text(), self.log_directory.text(), [ None, [None, self.path_checkpoint.text() ][os.path.isfile(self.path_checkpoint.text())] ][self.checkpoint_path_Box.isChecked()], self.warm_start.isChecked(), False, False, False, hpr, )) self.logBox.setPlainText("") self.x.start() def _stop_train(self): self.train.check_train = True self.x.join() self.x._stop() self.setLog("train stopped") def _output_directory(self): self.out_dir = QtWidgets.QFileDialog.getExistingDirectory( self, "Выберите папку вывода модели") self.output_directory.setText(self.out_dir) def _log_directory(self): self.log_dir = QtWidgets.QFileDialog.getExistingDirectory( self, "Выберите папку вывода логов") self.log_directory.setText(self.log_dir) def _path_checkpoint_enabled(self, state): if state == 2: self.path_checkpoint.setEnabled(True) self.path_checkpoint_button.setEnabled(True) else: self.path_checkpoint.setEnabled(False) self.path_checkpoint_button.setEnabled(False) def _path_checkpoint(self): self.checkpoint_model = QtWidgets.QFileDialog.getOpenFileName( self, "Выберите файл модели/контрольной точки от которой продолжить обучение" )[0] self.path_checkpoint.setText(self.checkpoint_model) def _training_files(self): self.train_file = QtWidgets.QFileDialog.getOpenFileName( self, "Выберите файл модели/контрольной точки от которой продолжить обучение" )[0] self.training_files.setText(self.train_file) def _validation_files(self): self.valid_file = QtWidgets.QFileDialog.getOpenFileName( self, "Выберите файл модели/контрольной точки от которой продолжить обучение" )[0] self.validation_files.setText(self.valid_file) def load_data(self): self._train_param_load() data_load = Serializ.load("config.txt") if data_load != False: self._WGModel = data_load[0] self._Syntethis.WGLoadModel(self._WGModel) self.WaveGlowModel.setText(self._WGModel) def WGSelectModel(self): self._WGModel = QtWidgets.QFileDialog.getOpenFileName( self, "Выберите файл модели/контрольной вокодера")[0] self._Syntethis.WGLoadModel(self._WGModel) self.WaveGlowModel.setText(self._WGModel) self.LoadWaveGlowModel.setEnabled(True) def SyntethisFunc(self): if self.text_to_speach_list[ self.tabWidget.currentIndex()].toPlainText() != "": model_param = [self._WGModel] Serializ.dump("config.txt", model_param) self.player._stop() self.canvas.update_g( self._Syntethis.SyntethisFunc( self.text_to_speach_list[ self.tabWidget.currentIndex()].toPlainText(), int(self.SampleRate.currentText()), self.AddSpeech.isChecked())) self.player.set_file('TempFile/test.wav') self.canvas1.print_spec('TempFile/test.wav') self.canvas_comp.update() self.Mel_spec.repaint() self.repaint() def playSpeeck(self): print(self.player.duration()) self.double_slider.setMaximum(self.player.duration() / 1000) self.player.play() def saveFile(self): file_name = QtWidgets.QFileDialog.getSaveFileName( self, "Сохранить речь", '', 'WAV (*.wav)')[0] if file_name != "": if self.AddSpeech.isChecked() == False: self._Syntethis.SaveFile(file_name, int(self.SampleRate.currentText())) else: self._Syntethis.SaveFile(file_name, int(self.SampleRate.currentText()), add=1) def setLog(self, text): temp = self.logBox.toPlainText() self.logBox.setText(temp + text) self.logBox.moveCursor(QtGui.QTextCursor.End) def setPosMax(self, MAX): self.double_slider.setValue(MAX / 1000) print(MAX / 1000)