def evaluate(model, neval_batches):
    model.eval()
    cnt = 0
    elapsed = 0
    filename = config.validation_files
    with open(filename, 'r') as file:
        lines = file.readlines()

    # Batches
    for line in lines:
        tokens = line.strip().split('|')
        text = tokens[1]
        sequence = np.array(text_to_sequence(text))[None, :]
        sequence = torch.from_numpy(sequence).long()

        start = time.time()
        mel_outputs, mel_outputs_postnet, _, alignments = model.inference(
            sequence)
        end = time.time()
        elapsed = elapsed + (end - start)
        cnt += 1

        print('.', end='')
        if cnt >= neval_batches:
            break

    print('\nElapsed: {}{:.5f}{} sec'.format(bcolors.OKGREEN, elapsed,
                                             bcolors.ENDC))
    return
示例#2
0
 def get_text(self, text):
     text = text_to_sequence(text)
     text_norm = torch.IntTensor(text)
     return text_norm
示例#3
0
    checkpoint = 'BEST_checkpoint.tar'
    checkpoint = torch.load(checkpoint)
    model = checkpoint['model']
    model.eval()

    waveglow_path = 'waveglow_256channels.pt'
    waveglow = torch.load(waveglow_path)['model']
    waveglow.cuda().eval().half()
    for k in waveglow.convinv:
        k.float()
    denoiser = Denoiser(waveglow)

    text = "相对论直接和间接的催生了量子力学的诞生 也为研究微观世界的高速运动确立了全新的数学模型"
    text = pinyin.get(text, format="numerical", delimiter=" ")
    print(text)
    sequence = np.array(text_to_sequence(text))[None, :]
    sequence = torch.autograd.Variable(
        torch.from_numpy(sequence)).cuda().long()

    mel_outputs, mel_outputs_postnet, _, alignments = model.inference(sequence)
    plot_data((mel_outputs.float().data.cpu().numpy()[0],
               mel_outputs_postnet.float().data.cpu().numpy()[0],
               alignments.float().data.cpu().numpy()[0].T))

    ensure_folder('images')
    plt.savefig('images/mel_spec.jpg')

    mel_outputs_postnet = mel_outputs_postnet.type(torch.float16)
    with torch.no_grad():
        audio = waveglow.infer(mel_outputs_postnet, sigma=0.666)
示例#4
0
if __name__ == '__main__':
    print('Reading vocab...')
    in_vocab = read_vocab()
    in_vocab +=  ['<unk>', '<EOS>', '<mask>']
    out_vocab = ['(', ')', '<TOK>', '<EOS>']
    print('Done.')

    print('Reading test data...')
    BATCH_SIZE = 128
    _, X_test = ptb(section='wsj_23', directory='data/', column=0)
    _, y_test = ptb(section='wsj_23', directory='data/', column=1)
    X_test, y_test = sort_by_len(X_test, y_test)
    X_test_raw, _ = batch(X_test, batch_size=BATCH_SIZE, mask='<mask>') 
    y_test_raw, _ = batch(y_test, batch_size=BATCH_SIZE, mask='<mask>')
    X_test_seq, word_to_n, n_to_word = text_to_sequence(X_test, in_vocab)
    y_test_seq, _, _ = text_to_sequence(y_test, out_vocab)
    X_test_seq, X_test_masks = batch(X_test_seq, batch_size=BATCH_SIZE, mask=len(in_vocab)-1)
    y_test_seq, y_test_masks = batch(y_test_seq, batch_size=BATCH_SIZE, mask=len(in_vocab)-1)
    print('Done.')

    print('Building model...')
    collection = dy.ParameterCollection()
    seq2seq = Seq2SeqAttention(collection, len(in_vocab), len(out_vocab))
    print('Done.')

    print('Loading model...')
    RUN = 'runs/baseline'
    checkpoint = os.path.join(RUN, 'baseline.model')
    print('Loading from %s.' % checkpoint)
    collection.populate(checkpoint)