示例#1
0
文件: main.py 项目: siahuat0727/tirf
def main():
    args = parse()
    match args.task:
        case 'play':
            play(args)
        case 'gen':
            generate(args)
示例#2
0
    def ask_operation(chunk):
        'ask the operator to classify this chunk'

        question = Menu(TRUTH_OPTIONS,
                        title="Who's speaking in the audio you just heard?")
        if chunk.truth not in TRUTH_OPTIONS:
            play(chunk.cut(audio))
            chunk.truth = question.ask()
示例#3
0
 def _user_key(self, key):
     """ user pressed this key or button """
     if (key > 0) and (key <= vivi_defines.CATEGORIES_NUMBER):
         self.judged_cat.emit(key - vivi_defines.CATEGORIES_CENTER_OFFSET)
     elif key == 8:
         utils.play(self.judge_filename + '.wav')
     elif key == 9:
         self.judged_cat.emit(JUDGEMENT_CANCEL)
     elif key == 0:
         self.judged_cat.emit(vivi_defines.CATEGORY_WEIRD)
示例#4
0
 def _user_key(self, key):
     """ user pressed this key or button """
     if (key > 0) and (key <= vivi_defines.CATEGORIES_NUMBER):
         self.judged_cat.emit(key - vivi_defines.CATEGORIES_CENTER_OFFSET)
     elif key == 8:
         utils.play(self.judge_filename+'.wav')
     elif key == 9:
         self.judged_cat.emit(JUDGEMENT_CANCEL)
     elif key == 0:
         self.judged_cat.emit(vivi_defines.CATEGORY_WEIRD)
示例#5
0
def get_interplay_data(agent1, agent2, num_episode):
    experiences = []
    for i in range(num_episode//2):
        experience, z = utils.play(agent1, agent2)
        for i, e in enumerate(experience):
            experiences.append(e + [z if i % 2 == 0 else -z])

        experience, z = utils.play(agent2, agent1)
        for i, e in enumerate(experience):
            experiences.append(e + [z if i % 2 == 0 else -z])

    return experiences
示例#6
0
def get_win_rate(agent1, agent2, num):
    sum_z = 0
    for i in range(num//2):
        _, z = utils.play(agent1, agent2)
        sum_z += z
        _, z = utils.play(agent2, agent1)
        sum_z -= z
    if num%2 == 1:
        if np.random.random() > 0.5:
            _, z = utils.play(agent1, agent2)
            sum_z += z
        else:
            _, z = utils.play(agent2, agent1)
            sum_z -= z
    return sum_z / num / 2 + 0.5
示例#7
0
def playLive(name, quality):
    """
    :param name: string: stream/channel name
    :param quality: string: qualities[quality]
    0 = Source, 1 = 1080p60, 2 = 1080p30, 3 = 720p60, 4 = 720p30, 5 = 540p30, 6 = 480p30, 7 = 360p30, 8 = 240p30, 9 = 144p30
    -1 = Choose quality dialog
    * any other value for quality will use addon setting
    """
    videoQuality = utils.getVideoQuality(quality)
    if videoQuality != -1:
        # videoQuality == -1 if quality dialog was cancelled
        stream = CONVERTER.convertStreamToPlayItem(TWITCHTV.getStreamInfo(name))
        stream['path'] = TWITCHTV.getLiveStream(name, videoQuality)
        utils.play(stream['path'], stream)
        utils.execIrcPlugin(name)
示例#8
0
def playLive(name, quality):
    """
    :param name: string: stream/channel name
    :param quality: string: qualities[quality]
    qualities = {'-1': -1, '0': 0, '1': 1, '2': 2, '3': 3, '4': 4}
    0 = Best, 1 = 720, 2 = 480, 3 = 360, 4 = 226,
    -1 = Choose quality dialog
    * any other value for quality will use addon setting
    """
    videoQuality = utils.getVideoQuality(quality)
    if videoQuality != -1:
        # videoQuality == -1 if quality dialog was cancelled
        stream = CONVERTER.convertStreamToPlayItem(TWITCHTV.getStreamInfo(name))
        stream['path'] = TWITCHTV.getLiveStream(name, videoQuality)
        utils.play(stream['path'], stream)
        utils.execIrcPlugin(name)
示例#9
0
def get_selfplay_data(agent, num_episode):
    experiences = []
    for i in range(num_episode):
        experience, z = utils.play(agent, agent)
        for i, e in enumerate(experience):
            experiences.append(e + [z if i % 2 == 0 else -z])
    return experiences
示例#10
0
def playLive(name, quality):
    """
    :param name: string: stream/channel name
    :param quality: string: qualities[quality]
    qualities = {'-1': -1, '0': 0, '1': 1, '2': 2, '3': 3, '4': 4}
    0 = Best, 1 = 720, 2 = 480, 3 = 360, 4 = 226,
    -1 = Choose quality dialog
    * any other value for quality will use addon setting
    """
    videoQuality = utils.getVideoQuality(quality)
    if videoQuality != -1:
        # videoQuality == -1 if quality dialog was cancelled
        stream = CONVERTER.convertStreamToPlayItem(TWITCHTV.getStreamInfo(name))
        stream['path'] = TWITCHTV.getLiveStream(name, videoQuality)
        utils.play(stream['path'], stream)
        utils.execIrcPlugin(name)
示例#11
0
 def play(self):
     if not self.examine_note.basename:
         return
     if self.plot_actions.has_selection():
         start, dur = self.get_zoom_seconds()
         #print "play zoom in on: ", start, dur
         #utils.play(self.examine_note.wavfile,
         #    start, dur)
         #return
     else:
         start = self.examine_note.note_start
         dur = self.examine_note.note_length
         # to avoid icky blimps in the big grid display
         start += CONVOLUTION_DELAY
         dur -= CONVOLUTION_DELAY
         #print "play note: ", start, dur
     utils.play(self.examine_note.basename + '.wav', start, dur)
示例#12
0
def minimizer(state: dict, depth: int):
    if is_final(state) or depth == 0:
        return heur(state)

    value = inf

    for x in range(0, LINE_SIZE):
        if pos_is_playable(state, x, PLAYER_0):
            played = play(state, x, PLAYER_0)
            value = min(value, maximizer(played, depth - 1))

    return value
示例#13
0
def playVideo(_id, quality):
    """
    :param _id: string: video id
    :param quality: string: qualities[quality]
    qualities = {'-1': -1, '0': 0, '1': 1, '2': 2, '3': 3, '4': 4}
    0 = Best, 1 = 720, 2 = 480, 3 = 360, 4 = 226,
    -1 = Choose quality dialog
    * any other value for quality will use addon setting
    """
    videoQuality = utils.getVideoQuality(quality)
    if videoQuality != -1:
        # videoQuality == -1 if quality dialog was cancelled
        videoInfo = CONVERTER.getVideoInfo(TWITCHTV.getVideo(_id))
        simplePlaylist = TWITCHTV.getVideoPlaylist(_id, videoQuality)
        playlistItems = PLAYLIST_CONVERTER.convertToXBMCPlaylist(simplePlaylist, videoInfo.get('title', ''),
                                                                 videoInfo.get('thumbnail', ''))
        if playlistItems != ():
            (playlist, listItem) = playlistItems
            utils.play(listItem.get('path', ''), listItem)
        else:
            raise TwitchException(TwitchException.NO_PLAYABLE)
示例#14
0
def playVideo(_id, quality):
    """
    :param _id: string: video id
    :param quality: string: qualities[quality]
    0 = Source, 1 = 1080p60, 2 = 1080p30, 3 = 720p60, 4 = 720p30, 5 = 540p30, 6 = 480p30, 7 = 360p30, 8 = 240p30, 9 = 144p30
    -1 = Choose quality dialog
    * any other value for quality will use addon setting
    """
    videoQuality = utils.getVideoQuality(quality)
    oauthtoken = utils.getOauthToken()
    if videoQuality != -1:
        # videoQuality == -1 if quality dialog was cancelled
        videoInfo = CONVERTER.getVideoInfo(TWITCHTV.getVideo(_id))
        simplePlaylist = TWITCHTV.getVideoPlaylist(_id, videoQuality, oauthtoken)
        playlistItems = PLAYLIST_CONVERTER.convertToXBMCPlaylist(simplePlaylist, videoInfo.get('title', ''),
                                                                 videoInfo.get('thumbnail', ''))
        if playlistItems != ():
            (playlist, listItem) = playlistItems
            utils.play(listItem.get('path', ''), listItem)
        else:
            raise TwitchException(TwitchException.NO_PLAYABLE)
示例#15
0
def playVideo(_id, quality):
    """
    :param _id: string: video id
    :param quality: string: qualities[quality]
    qualities = {'-1': -1, '0': 0, '1': 1, '2': 2, '3': 3, '4': 4}
    0 = Best, 1 = 720, 2 = 480, 3 = 360, 4 = 226,
    -1 = Choose quality dialog
    * any other value for quality will use addon setting
    """
    videoQuality = utils.getVideoQuality(quality)
    if videoQuality != -1:
        # videoQuality == -1 if quality dialog was cancelled
        videoInfo = CONVERTER.getVideoInfo(TWITCHTV.getVideo(_id))
        simplePlaylist = TWITCHTV.getVideoPlaylist(_id, videoQuality)
        playlistItems = PLAYLIST_CONVERTER.convertToXBMCPlaylist(
            simplePlaylist, videoInfo.get('title', ''),
            videoInfo.get('thumbnail', ''))
        if playlistItems != ():
            (playlist, listItem) = playlistItems
            utils.play(listItem.get('path', ''), listItem)
        else:
            raise TwitchException(TwitchException.NO_PLAYABLE)
示例#16
0
def minimax(state: dict, depth: int):
    to_play = -1
    value = -inf

    for x in range(0, LINE_SIZE):

        if pos_is_playable(state, x, PLAYER_1):
            played = play(state, x, PLAYER_1)
            game_value = minimizer(played, depth)
            if game_value > value:
                value = game_value
                to_play = x

    return to_play
 def post(self):
     args = self.reqparse.parse_args()
     token = args['token']
     move = args['move']
     register = game['register']
     status = game['status']
     player = utils.get_key(register, token)
     if player != -1:
         game['status'][player] = move
         if status['player1'] != None and status['player2'] != None:
             previousGame = utils.play(status['player1'], status['player2'])
             status['previousGame'] = previousGame
         print(game)
         return 1
     else:
         return -1
示例#18
0
文件: alphabeta.py 项目: YK0L0DIY/IA
def minimizer(state: dict, depth: int, alpha: float, beta: float):
    if is_final(state) or depth == 0:
        return heur(state)

    value = inf

    for x in range(0, LINE_SIZE):
        if pos_is_playable(state, x, PLAYER_0):
            played = play(state, x, PLAYER_0)
            value = min(value, maximizer(played, depth - 1, alpha, beta))

            if value <= alpha:
                return value

            beta = min(beta, value)

    return value
示例#19
0
        1j * phase)  # that fixes the abs() ope previously done

    features = np.transpose(features, (1, 0))
    return feature_extractor.get_audio_from_stft_spectrogram(features)


for pred, target, phase in train_dataset:

    # pred = np.transpose(pred, (1, 0))
    # target = np.transpose(target, (1, 0))
    print("Min:", np.min(pred), "Max:", np.max(pred))
    print("Min:", np.min(target), "Max:", np.max(target))
    print("Min:", np.min(phase), "Max:", np.max(phase))

    phase = np.transpose(phase.numpy(), (1, 0))
    print("Pred:", pred.shape)
    print("Phase:", phase.shape)
    print("target:", target.shape)
    audio = revert_features_to_audio(target.numpy(), phase)
    break

print("Audio length:", len(audio))
play(audio, sample_rate=16000)

# Min: -0.5883574 Max: 10.728247
# Min: -4.8901606 Max: 7.3664904
# Min: -3.1415927 Max: 3.1415927
# Phase: (129, 201)
# target: (201, 129, 1, 1)
# Audio length: 12800
示例#20
0
from utils import play

if __name__ == '__main__':
    play()
示例#21
0
from TicTacToe import TicTacToe
from Players.Human import Human
from Players.AI import WeakAI, SmartAI
from utils import play

if __name__ == '__main__':
  x_player = Human('X')
  o_player = SmartAI('O')
  t = TicTacToe()
  play(t, x_player, o_player, print_game=True)
示例#22
0
 def user_judge(self, judge_filename):
     """ prompt user to judge this audio file """
     self.judge_filename = judge_filename
     self.display()
     utils.play(self.judge_filename + '.wav')
示例#23
0
 def user_judge(self, judge_filename):
     """ prompt user to judge this audio file """
     self.judge_filename = judge_filename
     self.display()
     utils.play(self.judge_filename+'.wav')
示例#24
0
 def table_play(self):
     row = self.table.currentRow()
     if row >= 0:
         wavfile = self.data[row][0]
         utils.play(wavfile)
示例#25
0
def connect4(path='data/connect4', seed=161831415):
    env = envs.Connect4()

    rand_epochs = 1000
    ai_epochs = 0

    test_games = 500
    mem_size = 200
    log_freq = 100

    # 3 states per position
    depth = 3
    # The state is preprocessed and has this shape now
    dim_state = [depth, *env.n_state]
    log = Logger(log_freq)
    # Simple dqn
    net = dqn.Conn(depth, env.n_action)
    ai = agents.DQNAgent(env.n_state,
                         env.n_action,
                         net,
                         logger=log,
                         lr=1e-3,
                         discount_factor=.98,
                         exploration_decay=.98,
                         exploration_min=.1,
                         state_preprocessor=f_one_hot_state(depth,
                                                            -1,
                                                            new_size=[1] +
                                                            dim_state))
    mem = LinearMemory(dim_state, mem_size, ai.learn)
    # Train first against random agent
    rand_act = envs.Connect4.random_act()

    # Loading
    # TODO : ai.load(path)

    # Training
    print('Training vs random')
    train(ai, rand_act, mem, env, rand_epochs, log, False)
    # print('Training vs ai')
    # TODO : train(ai, ai.act, mem, env, ai_epochs, log, True)

    # Saving
    # TODO : ai.save(path)

    # Testing
    ai.exploration_rate = 0
    win, draw = test(ai.act,
                     rand_act,
                     env,
                     games=test_games,
                     state_preprocessor=ai.state_preprocessor)

    print(f'Test on {test_games} games : Victories : {win} Draws : {draw}')
    print(f'Win or draw rate : {(win + draw) / test_games * 100:.1f} %')

    # Playing
    while 1:
        print('New Game')
        p1, p2 = play(ai.act,
                      user_act(env.n_action),
                      env,
                      state_preprocessor=ai.state_preprocessor)
        if p1 > 0:
            print('AI won')
        elif p2 > 0:
            print('You won')
        else:
            print('Error / Draw')
示例#26
0
def confirm_truth(clf,
                  audio,
                  chunk_group_or_voice,
                  group=10,
                  limit=10,
                  speed=1):
    '''Successively confirm truth suggestions of groups of chunks.
    Spawns refit and repredict on new ground truth
    whenever possible, on a separate thread.'''

    features = get_features(audio)
    chunks = get_chunks(audio)
    print('Confirm label classifications.')
    print('Type:')
    print('      * just ENTER to confirm'
          '      * "s" to set SPEAKER as ground truth\n'
          '      * "t" to set TRANSLATOR as ground truth\n'
          '      * "b" to set BOTH as ground truth\n'
          '      * "a" to hear it again\n'
          '      * "/" to inspect one by one\n'
          '      * and anything else to stop.')

    def _refit_and_predict():
        training_chunks = {
            voice: [c for c in chunks if c.truth == voice]
            for voice in VOICES
        }
        spawn_refit_and_predict(clf, features, training_chunks, chunks)

    while (limit):
        limit = limit - 1  # we need to explicitly decrement to enable repeat
        if chunk_group_or_voice in VOICES:
            unknown = [
                c for c in chunks
                if not c.truth and c.label[0] == chunk_group_or_voice
            ]
        else:
            unknown = [c for c in chunk_group_or_voice if not c.truth]

        best_first = get_best_labeled(unknown, group, 1000)

        if not best_first:
            # give up min audible length
            best_first = get_best_labeled(unknown, group, 0)
            if not best_first:
                # really done
                break

        for best in best_first:
            print('#' * 30, best.label, chunks.index(best))
        play(sum(best.cut(audio) for best in best_first), speed)

        typed = raw_input().strip().lower()
        truth_option = {'s': SPEAKER, 't': TRANSLATOR, 'b': BOTH}.get(typed)

        if not typed:
            # default to label as ground truth
            for best in best_first:
                best.truth = best.label[0]
            _refit_and_predict()
        elif truth_option:
            # truth value set explicitly
            for best in best_first:
                best.truth = truth_option
            _refit_and_predict()
        elif typed == 'a':
            # play again
            limit = limit + 1  # restore limit
            continue
        elif typed == '/':
            # start inspecting one by one
            # increase limit to inspect at least all group
            limit = limit + group
            group = 1
            speed = 1  # slow down
            continue
        else:
            break
示例#27
0
            ax.set_xlabel('Time step [frame]')
            ax.legend(['Threshold', 'Likelihood', 'Mask'])

        ax = axes[0 + has_mask]
        ax.plot(batch_np['loudness_db'][:TRIM])
        ax.plot(audio_features_mod['loudness_db'][:TRIM])
        ax.set_ylabel('loudness_db')
        ax.legend(['Original', 'Adjusted'])

        ax = axes[1 + has_mask]
        ax.plot(librosa.hz_to_midi(batch_np['f0_hz'][:TRIM]))
        ax.plot(librosa.hz_to_midi(audio_features_mod['f0_hz'][:TRIM]))
        ax.set_ylabel('f0 [midi]')
        _ = ax.legend(['Original', 'Adjusted'])

        plt.show()

    else:
        print(
            '\nSkipping auto-adjust (no notes detected or ADJUST box empty).')

# Resynthesize audio.
af = batch if audio_features_mod is None else audio_features_mod
outputs = model(af, training=False)

audio_gen = model.get_audio_from_outputs(outputs)

play(audio_gen, savefile=jbprompt.selected_filename)

print("Finished Playing Sound")
            if not ate: self.body.pop(0)

        self.update_state()
        return self.state, reward, False, {}


if __name__ == "__main__":
    env = Snake()
    agent = Rainbow_DQN_Agent(environment=env,
                              model_class=SnakeModel,
                              learning_rate=0.001,
                              gamma=0.99,
                              replay_buffer_size=20000,
                              minimum_buffer_size=5000,
                              noisy_net=True,
                              epsilon=1,
                              epsilon_decay=0.999,
                              epsilon_min=0.01,
                              prioritized_sample=True,
                              alpha=0.5,
                              beta0=0.6,
                              beta_iters=100000,
                              transfer_frequency=1000,
                              device=torch.device('cuda:0'))
    agent.train(num_episodes=2000,
                save_path='models/snake_model.pth',
                batch_size=128)
    play(environment=env,
         model_class=SnakeModel,
         model_path='models/snake_model.pth',
         num_episodes=1)
示例#29
0
import fileinput
import re

from utils import play


if __name__ == "__main__":

    m = re.match(f"(.+) players; last marble is worth (.+) points", list(fileinput.input())[0])
    nplayers, nmarbles = int(m.group(1)), int(m.group(2))
    score = play(nplayers, nmarbles, list)

    players = list(score.keys())
    players.sort(key=lambda x: score[x], reverse=True)
    print(f"Player {players[0]} has highest score with {score[players[0]]}")
示例#30
0
def play(request):
    return utils.play(request)
        x = torch.relu(self.fc2(x))
        x = self.fc3(x)
        return x

if __name__ == "__main__":
    env = gym.make('MountainCar-v0')
    agent = Rainbow_DQN_Agent(
        environment = env,
        model_class = CartPoleModel,
        learning_rate = 0.01,
        gamma = 0.95,
        replay_buffer_size = 10000,
        minimum_buffer_size = 1000,
        prioritized_sample = False,
        alpha = 0,
        beta0 = 0,
        beta_iters = 0,
        transfer_frequency = 200,
        device = torch.device('cpu')
    )
    agent.train(
        num_episodes = 500,
        save_path = 'models/mountaincar_model.pth',
        batch_size = 128
    )
    play(
        environment = env,
        model_class = CartPoleModel,
        model_path = 'models/mountaincar_model.pth',
        num_episodes = 1
    )
示例#32
0
 def table_play(self):
     row = self.table.currentRow()
     if row >= 0:
         wavfile = self.data[row][0]
         utils.play(wavfile)
示例#33
0
 def inner(*_, **__):
     nonlocal id
     play(id)
示例#34
0
heuristic1 = heuristic_1  # Red
heuristic2 = random_heuristic  # Blue

timeouts = sorted(
    set([float(x) if (x % 50 == 0) else 50 for x in range(50, 1001)]))
search_depths = list([x for x in range(1, 6)])

plt.ylabel('maximum depth')
plt.xlabel('maximum timeout')
plt.title('Heuristics results')

for depth in search_depths:
    for timeout in timeouts:
        print(' depth: ', depth, ' timeout: ', timeout)
        player1 = QuixoPlayer(depth, timeout, heuristic1)  #  X
        player2 = QuixoPlayer(depth, timeout, heuristic2)  #  O
        try:
            game = play(player1, player2)
        except Exception:
            plt.plot(timeout, depth, c='black', marker='o', linewidth=1.0)
            continue
        if (game.game_over('X')):  # If X is the winner
            plt.plot(timeout, depth, c='red', marker='o', linewidth=1.0)
        else:
            if (game.game_over('O')):  # If O is the winner
                plt.plot(timeout, depth, c='blue', marker='o', linewidth=1.0)
            else:  # Tie
                plt.plot(timeout, depth, c='black', marker='o', linewidth=1.0)

plt.show()
示例#35
0
    global current_synthesized_model, synthesizer
    encoder.load_model("Real_Time_Voice_Cloning/pretrained/encoder/saved_models/pretrained.pt", "cpu")  # what is this used for
    vocoder.load_model("Real_Time_Voice_Cloning/pretrained/vocoder/saved_models/pretrained.pt", verbose=False)
    # todo: figure out how the multiple utterances work
    synthesizer = get_synthesizer("Real_Time_Voice_Cloning/pretrained/synthesizer/saved_models/logs-pretrained")
    if len(text) > 4 and text[-4:] == ".txt":  # check if file
        words = ""
        with open(text) as file:
            for line in file:
                words += line
        text = words
        del words
    if isinstance(audio_samples, str):
        utterance = Utterance("name", "speaker_name", None, None, np.load(audio_samples), None, None)
    else:
        utterance = create_utterance(audio_samples)
    current_synthesized_model = generate_spectrogram(text, utterance)
    audio_file = decode_spectrogram(*current_synthesized_model)
    return audio_file, utterance.embed


if __name__ == '__main__':
    sample_rate = syn_params.hparams.sample_rate
    while True:
        input("Hit enter to record:")
        wav = utils.record(sample_rate, 5)
        input("Hit enter to play")
        utils.play(wav, sample_rate)
        print(wav.shape)