예제 #1
0
def inv_spectrogram_tensorflow(spectrogram):
    '''Builds computational graph to convert spectrogram to waveform using TensorFlow.

    Unlike inv_spectrogram, this does NOT invert the preemphasis. The caller should call
    inv_preemphasis on the output after running the graph.
    '''
    S = _db_to_amp_tensorflow(_denormalize_tensorflow(spectrogram) + get_hparams().ref_level_db)
    return _griffin_lim_tensorflow(tf.pow(S, get_hparams().power))
예제 #2
0
def find_endpoint(wav, threshold_db=-40, min_silence_sec=0.8):
    window_length = int(get_hparams().sample_rate * min_silence_sec)
    hop_length = int(window_length / 4)
    threshold = _db_to_amp(threshold_db)
    for x in range(hop_length, len(wav) - window_length, hop_length):
        if np.max(wav[x:x + window_length]) < threshold:
            return x + hop_length
    return len(wav)
예제 #3
0
def _griffin_lim(S):
    '''librosa implementation of Griffin-Lim
    Based on https://github.com/librosa/librosa/issues/434
    '''
    angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
    S_complex = np.abs(S).astype(np.complex)
    y = _istft(S_complex * angles)
    for i in range(get_hparams().griffin_lim_iters):
        angles = np.exp(1j * np.angle(_stft(y)))
        y = _istft(S_complex * angles)
    return y
예제 #4
0
def _griffin_lim_tensorflow(S):
    '''TensorFlow implementation of Griffin-Lim
    Based on https://github.com/Kyubyong/tensorflow-exercises/blob/master/Audio_Processing.ipynb
    '''
    with tf.variable_scope('griffinlim'):
        # TensorFlow's stft and istft operate on a batch of spectrograms; create batch of size 1
        S = tf.expand_dims(S, 0)
        S_complex = tf.identity(tf.cast(S, dtype=tf.complex64))
        y = _istft_tensorflow(S_complex)
        for i in range(get_hparams().griffin_lim_iters):
            est = _stft_tensorflow(y)
            angles = est / tf.cast(tf.maximum(1e-8, tf.abs(est)), tf.complex64)
            y = _istft_tensorflow(S_complex * angles)
        return tf.squeeze(y, 0)
예제 #5
0
def inv_spectrogram(spectrogram):
    '''Converts spectrogram to waveform using librosa'''
    S = _db_to_amp(_denormalize(spectrogram) + get_hparams().ref_level_db)  # Convert back to linear
    return inv_preemphasis(_griffin_lim(S ** get_hparams().power))  # Reconstruct phase
예제 #6
0
def inv_preemphasis(x):
    return signal.lfilter([1], [1, -get_hparams().preemphasis], x)
예제 #7
0
def spectrogram(y):
    D = _stft(preemphasis(y))
    S = _amp_to_db(np.abs(D)) - get_hparams().ref_level_db
    return _normalize(S).astype(np.float32)
예제 #8
0
def _denormalize(S):
    return (np.clip(S, 0, 1) * -get_hparams().min_level_db) + get_hparams().min_level_db
예제 #9
0
def _denormalize_tensorflow(S):
    return (tf.clip_by_value(S, 0, 1) * -get_hparams().min_level_db) + get_hparams().min_level_db
예제 #10
0
def _normalize(S):
    return np.clip((S - get_hparams().min_level_db) / -get_hparams().min_level_db, 0, 1)
예제 #11
0
def save_wav(wav, path):
    wav *= 32767 / max(0.01, np.max(np.abs(wav)))
    librosa.output.write_wav(path, wav, get_hparams().sample_rate)
예제 #12
0
def _build_mel_basis():
    n_fft = (get_hparams().num_freq - 1) * 2
    return librosa.filters.mel(get_hparams().sample_rate, n_fft, n_mels=get_hparams().num_mels)
예제 #13
0
def _stft_parameters():
    n_fft = (get_hparams().num_freq - 1) * 2
    hop_length = int(get_hparams().frame_shift_ms / 1000 * get_hparams().sample_rate)
    win_length = int(get_hparams().frame_length_ms / 1000 * get_hparams().sample_rate)
    return n_fft, hop_length, win_length
예제 #14
0
def load_wav(path, offset=0.0, duration=None):
    return librosa.core.load(path, sr=get_hparams().sample_rate, offset=offset, duration=duration)[0]