示例#1
0
def evaluate_model(model, data_loader, checkpoint_dir, limit_eval_to=5):
    """evaluate model and save generated wav and plot

    """
    test_path = data_loader.dataset.test_path
    test_mel_path = data_loader.dataset.test_mel_path
    test_mel_files = sorted(os.listdir(test_mel_path))
    test_wav_path = data_loader.dataset.test_mel_path
    test_wav_files = sorted(os.listdir(test_wav_path))
    counter = 0
    output_dir = os.path.join(checkpoint_dir, 'eval')
    for f in test_mel_files:
        mel = np.load(os.path.join(test_mel_path, f))
        wav = model.generate(mel)
        # save wav
        wav_path = os.path.join(
            output_dir,
            "checkpoint_step{:09d}_wav_{}.wav".format(global_step, counter))
        wavfile.write(wav_path, hp.sample_rate, soundsc(wav))
        #librosa.output.write_wav(wav_path, wav, sr=hp.sample_rate)
        # save wav plot
        fig_path = os.path.join(
            output_dir,
            "checkpoint_step{:09d}_wav_{}.png".format(global_step, counter))
        fig = plt.plot(wav.reshape(-1))
        plt.savefig(fig_path)
        # clear fig to drawing to the same plot
        plt.clf()
        counter += 1
        # stop evaluation early via limit_eval_to
        if counter >= limit_eval_to:
            break
示例#2
0
def evaluate_model(model, data_loader, checkpoint_dir, limit_eval_to=5):
    """evaluate model and save generated wav and plot

    """
    test_path = data_loader.dataset.test_path
    test_mel_path = data_loader.dataset.test_mel_path
    test_mel_files = sorted(os.listdir(test_mel_path))
    test_wav_path = data_loader.dataset.test_mel_path
    test_wav_files = sorted(os.listdir(test_wav_path))
    counter = 0
    output_dir = os.path.join(checkpoint_dir,'eval')
    for f in test_mel_files:
        mel = np.load(os.path.join(test_mel_path,f))
        mel_r, _ = data_loader.dataset.split(mel)
        # manually do the interpolation
        m_in = mel_r
        rs = hp.time_resample_factor
        if rs != 1:
            # interpolate back to same timescale
            t = [np.interp(np.arange(rs * m_in.shape[1]) / rs, np.arange(m_in.shape[1]), m_in[i, :]) for i in range(m_in.shape[0])]
            m_in = np.vstack(t)
        wav = model.generate(m_in)
        # save wav
        wav_path = os.path.join(output_dir,"checkpoint_step{:09d}_wav_{}.wav".format(global_step,counter))
        wavfile.write(wav_path, hp.sample_rate, soundsc(wav))
        #librosa.output.write_wav(wav_path, wav, sr=hp.sample_rate)
        # save wav plot
        fig_path = os.path.join(output_dir,"checkpoint_step{:09d}_wav_{}.png".format(global_step,counter))
        fig = plt.plot(wav.reshape(-1))
        plt.savefig(fig_path)
        # clear fig to drawing to the same plot
        plt.clf()
        counter += 1
        # stop evaluation early via limit_eval_to
        if counter >= limit_eval_to:
            break
示例#3
0
post_time = 0
joint_time = 0
for jj in range(n_plot):
    # use extra steps from earlier
    pjj = preds[:(finished_step[jj] + extra_steps), jj]
    spectrogram = pjj * saved_std + saved_mean
    mel_dump = "sample_{}_mels.npz".format(jj)
    np.savez(mel_dump, mels=spectrogram)
    prename = "sample_{}_pre.wav".format(jj)

    this_time = time.time()

    reconstructed_waveform = sonify(spectrogram,
                                    len(spectrogram) * step, logmel)
    end_this_time = time.time()
    wavfile.write(prename, sample_rate, soundsc(reconstructed_waveform))
    elapsed = end_this_time - this_time

    print("Elapsed pre sampling time {} s".format(elapsed))
    pre_time += elapsed
    joint_time += elapsed

    fftsize = 512
    substep = 32
    postname = "sample_{}_post.wav".format(jj)
    this_time = time.time()

    rw_s = np.abs(
        stft(reconstructed_waveform,
             fftsize=fftsize,
             step=substep,