def _process_utterance(wav_path, hparams): try: # Load the audio as numpy array sr, wav = read(wav_path) wav = wav.astype(np.float32) MAX_WAV_VALUE = 32768.0 wav = wav / MAX_WAV_VALUE except FileNotFoundError: # catch missing wav exception print( 'file {} present in csv metadata is not present in wav folder. skipping!' .format(wav_path)) return None except Exception as e: wav, sr = sf.read(wav_path) D = audio._stft(wav, hparams) mel_spectrogram = audio._linear_to_mel(np.abs(D), hparams) if hparams.vocoder == 'waveglow': mel_spectrogram = audio.dynamic_range_compression(mel_spectrogram) else: mel_spectrogram = audio.amp_to_db(mel_spectrogram) mel_spectrogram = audio.normalize(mel_spectrogram, hparams) mel_spectrogram = (mel_spectrogram * 8.) - 4. mel_spectrogram = mel_spectrogram.astype(np.float32) mel_frames = mel_spectrogram.shape[1] constant_values = 0. if hparams.use_lws: # Ensure time resolution adjustement between audio and mel-spectrogram fft_size = hparams.n_fft if hparams.win_size is None else hparams.win_size l, r = audio.pad_lr(wav, fft_size, audio.get_hop_size(hparams)) # Zero pad audio signal out = np.pad(wav, (l, r), mode='constant', constant_values=constant_values) else: # Ensure time resolution adjustement between audio and mel-spectrogram l_pad, r_pad = audio.librosa_pad_lr(wav, hparams.n_fft, audio.get_hop_size(hparams), 1) # Reflect pad audio signal (Just like it's done in Librosa to avoid frame inconsistency) out = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=constant_values) assert len(out) >= mel_frames * audio.get_hop_size(hparams) # time resolution adjustement # ensure length of raw audio is multiple of hop size so that we can use # transposed convolution to upsample out = out[:mel_frames * audio.get_hop_size(hparams)] assert len(out) % audio.get_hop_size(hparams) == 0 return out, mel_spectrogram
def _process_utterance(out_dir, index, wav_path, text, trim_silence=False): # Load the audio to a numpy array: wav = audio.load_wav(wav_path) # Trim begin/end silences # NOTE: the threshold was chosen for clean signals # TODO: Remove, get this out of here. if trim_silence: wav, _ = librosa.effects.trim(wav, top_db=60, frame_length=2048, hop_length=512) if hparams.highpass_cutoff > 0.0: wav = audio.low_cut_filter(wav, hparams.sample_rate, hparams.highpass_cutoff) # Mu-law quantize if is_mulaw_quantize(hparams.input_type): # Trim silences in mul-aw quantized domain silence_threshold = 0 if silence_threshold > 0: # [0, quantize_channels) out = P.mulaw_quantize(wav, hparams.quantize_channels - 1) start, end = audio.start_and_end_indices(out, silence_threshold) wav = wav[start:end] constant_values = P.mulaw_quantize(0, hparams.quantize_channels - 1) out_dtype = np.int16 elif is_mulaw(hparams.input_type): # [-1, 1] constant_values = P.mulaw(0.0, hparams.quantize_channels - 1) out_dtype = np.float32 else: # [-1, 1] constant_values = 0.0 out_dtype = np.float32 # Compute a mel-scale spectrogram from the trimmed wav: # (N, D) mel_spectrogram = audio.logmelspectrogram(wav).astype(np.float32).T if hparams.global_gain_scale > 0: wav *= hparams.global_gain_scale # Time domain preprocessing if hparams.preprocess is not None and hparams.preprocess not in [ "", "none" ]: f = getattr(audio, hparams.preprocess) wav = f(wav) # Clip if np.abs(wav).max() > 1.0: print("""Warning: abs max value exceeds 1.0: {}""".format( np.abs(wav).max())) # ignore this sample return ("dummy", "dummy", -1, "dummy") wav = np.clip(wav, -1.0, 1.0) # Set waveform target (out) if is_mulaw_quantize(hparams.input_type): out = P.mulaw_quantize(wav, hparams.quantize_channels - 1) elif is_mulaw(hparams.input_type): out = P.mulaw(wav, hparams.quantize_channels - 1) else: out = wav # zero pad # this is needed to adjust time resolution between audio and mel-spectrogram l, r = audio.pad_lr(out, hparams.fft_size, audio.get_hop_size()) if l > 0 or r > 0: out = np.pad(out, (l, r), mode="constant", constant_values=constant_values) N = mel_spectrogram.shape[0] assert len(out) >= N * audio.get_hop_size() # time resolution adjustment # ensure length of raw audio is multiple of hop_size so that we can use # transposed convolution to upsample out = out[:N * audio.get_hop_size()] assert len(out) % audio.get_hop_size() == 0 assert_ready_for_upsampling(out, mel_spectrogram, cin_pad=0, debug=True) # Write the spectrograms to disk: name = splitext(basename(wav_path))[0] audio_filename = "%s-wave.npy" % (name) mel_filename = "%s-feats.npy" % (name) np.save(os.path.join(out_dir, audio_filename), out.astype(out_dtype), allow_pickle=False) np.save( os.path.join(out_dir, mel_filename), mel_spectrogram.astype(np.float32), allow_pickle=False, ) # Return a tuple describing this training example: return (audio_filename, mel_filename, N, text)
def _process_utterance(out_dir,wav_path,sp2ind_dir,text): sp_f = open(sp2ind_dir,'r') sp2ind = json.load(sp_f) sp = wav_path.split('/')[-1].split('.')[0].split('_')[0] if sp in sp2ind: sp_ind = sp2ind[sp] else: sp_ind = -1 wav = audio.load_wav(wav_path) if not 'test' in wav_path: wav,_ = librosa.effects.trim(wav,top_db=60,frame_length=2048,hop_length=512) if hparams.highpass_cutoff > 0.0: wav = audio.low_cut_filter(wav, hparams.sample_rate, hparams.highpass_cutoff) if is_mulaw_quantize(hparams.input_type): # Trim silences in mul-aw quantized domain silence_threshold = 0 if silence_threshold > 0: # [0, quantize_channels) out = P.mulaw_quantize(wav, hparams.quantize_channels - 1) start, end = audio.start_and_end_indices(out, silence_threshold) wav = wav[start:end] constant_values = P.mulaw_quantize(0, hparams.quantize_channels - 1) out_dtype = np.int16 elif is_mulaw(hparams.input_type): # [-1, 1] constant_values = P.mulaw(0.0, hparams.quantize_channels - 1) out_dtype = np.float32 else: # [-1, 1] constant_values = 0.0 out_dtype = np.float32 # Compute a mel-scale spectrogram from the trimmed wav: # (N, D) mel_spectrogram = audio.logmelspectrogram(wav).astype(np.float32).T mfcc = audio.mfcc(wav).astype(np.float32).T if hparams.global_gain_scale > 0: wav *= hparams.global_gain_scale # Time domain preprocessing if hparams.preprocess is not None and hparams.preprocess not in ["", "none"]: f = getattr(audio, hparams.preprocess) wav = f(wav) # Clip if np.abs(wav).max() > 1.0: print("""Warning: abs max value exceeds 1.0: {}""".format(np.abs(wav).max())) # ignore this sample #return ("dummy", "dummy","dummy", -1,-1, "dummy") wav = np.clip(wav, -1.0, 1.0) # Set waveform target (out) if is_mulaw_quantize(hparams.input_type): out = P.mulaw_quantize(wav, hparams.quantize_channels - 1) elif is_mulaw(hparams.input_type): out = P.mulaw(wav, hparams.quantize_channels - 1) else: out = wav # zero pad # this is needed to adjust time resolution between audio and mel-spectrogram l, r = audio.pad_lr(out, hparams.fft_size, audio.get_hop_size()) if l > 0 or r > 0: out = np.pad(out, (l, r), mode="constant", constant_values=constant_values) N = mel_spectrogram.shape[0] assert len(out) >= N * audio.get_hop_size() # time resolution adjustment # ensure length of raw audio is multiple of hop_size so that we can use # transposed convolution to upsample out = out[:N * audio.get_hop_size()] assert len(out) % audio.get_hop_size() == 0 # Write the spectrograms to disk: #name = splitext(basename(wav_path))[0] #audio_filename = '%s-wave.npy' % (name) #mel_filename = '%s-feats.npy' % (name) audio_filename = f'{out_dir}wave.npy' mel_filename = f'{out_dir}mel.npy' mfcc_filename = f'{out_dir}mfcc.npy' assert mfcc.shape[0] == N np.save(audio_filename, out.astype(out_dtype), allow_pickle=False) np.save(mel_filename, mel_spectrogram.astype(np.float32), allow_pickle=False) np.save(mfcc_filename, mfcc.astype(np.float32), allow_pickle=False) # Return a tuple describing this training example: return (out_dir, N, sp_ind,text)
def _process_song(out_dir, index, wav_path, text): # Load the audio to a numpy array: wav = audio.load_wav(wav_path) # Trim begin/end silences # NOTE: the threshold was chosen for clean signals wav, _ = librosa.effects.trim(wav, top_db=60, frame_length=2048, hop_length=512) if hparams.highpass_cutoff > 0.0: wav = audio.low_cut_filter(wav, hparams.sample_rate, hparams.highpass_cutoff) # Mu-law quantize if is_mulaw_quantize(hparams.input_type): # Trim silences in mul-aw quantized domain silence_threshold = 0 if silence_threshold > 0: # [0, quantize_channels) out = P.mulaw_quantize(wav, hparams.quantize_channels - 1) start, end = audio.start_and_end_indices(out, silence_threshold) wav = wav[start:end] constant_values = P.mulaw_quantize(0, hparams.quantize_channels - 1) out_dtype = np.int16 elif is_mulaw(hparams.input_type): # [-1, 1] constant_values = P.mulaw(0.0, hparams.quantize_channels - 1) out_dtype = np.float32 else: # [-1, 1] constant_values = 0.0 out_dtype = np.float32 #### CLAIRE Work here wav_name = os.path.splitext(os.path.basename(wav_path))[0] os.makedirs('./pwavs', exist_ok=True) pwav_path = './pwavs/{0}.wav'.format(wav_name) scipy.io.wavfile.write(pwav_path, 16000, wav) # make the chord directory if it does not exist chord_dir = "chord_dir" os.makedirs(chord_dir, exist_ok=True) # create xml file with notes and timestamps #subprocess.check_call(['./extract_chord_notes.sh', wav_path, chord_dir], shell=True) #os.system('./extract_chord_notes.sh {0} {1}'.format(pwav_path, chord_dir)) os.system('./extract_chromagram.sh {0} {1} > /dev/null 2>&1'.format( pwav_path, chord_dir)) note_filename = '{0}/{1}.csv'.format(chord_dir, wav_name) #### Instead of computing the Mel Spectrogram, here return a time series of one hot encoded chords. # vector with 1 in row for each note played # 1000 samples per second note_samples = int(len(wav) / 2048) # 12 notes per octave chords_time_series = np.zeros((24, note_samples)) #print(np.shape(chords_time_series)) with open(note_filename, newline='\n') as csvfile: #chordreader = csv.reader(csvfile, delimeter=',') chordreader = csvfile.readlines() #print(chordreader) for idx, row in enumerate(chordreader): row = row.split(",") chromogram_samples = np.array(row).astype(np.float)[1:] chords_time_series[:, idx] = chromogram_samples chords_time_series = chords_time_series.T # if hparams.global_gain_scale > 0: # wav *= hparams.global_gain_scale # Time domain preprocessing if hparams.preprocess is not None and hparams.preprocess not in [ "", "none" ]: f = getattr(audio, hparams.preprocess) wav = f(wav) # wav = np.clip(wav, -1.0, 1.0) # Set waveform target (out) if is_mulaw_quantize(hparams.input_type): out = P.mulaw_quantize(wav, hparams.quantize_channels - 1) elif is_mulaw(hparams.input_type): out = P.mulaw(wav, hparams.quantize_channels - 1) else: out = wav # zero pad # this is needed to adjust time resolution between audio and mel-spectrogram l, r = audio.pad_lr(out, hparams.fft_size, audio.get_hop_size()) if l > 0 or r > 0: out = np.pad(out, (l, r), mode="constant", constant_values=constant_values) N = chords_time_series.shape[0] assert len(out) >= N * audio.get_hop_size() # time resolution adjustment # ensure length of raw audio is multiple of hop_size so that we can use # transposed convolution to upsample out = out[:N * audio.get_hop_size()] assert len(out) % audio.get_hop_size() == 0 # Write the spectrograms to disk: name = splitext(basename(wav_path))[0] audio_filename = '%s-wave.npy' % (name) chords_filename = '%s-feats.npy' % (name) np.save(os.path.join(out_dir, audio_filename), out.astype(out_dtype), allow_pickle=False) np.save(os.path.join(out_dir, chords_filename), chords_time_series.astype(out_dtype), allow_pickle=False) # Return a tuple describing this training example: return (audio_filename, chords_filename, N, text)
def _process_utterance(out_dir, index, wav_path, text): # Load the audio to a numpy array: wav = audio.load_wav(wav_path) # Trim begin/end silences # NOTE: the threshold was chosen for clean signals #wav, _ = librosa.effects.trim(wav, top_db=60, frame_length=2048, hop_length=512) #if hparams.highpass_cutoff > 0.0: # wav = audio.low_cut_filter(wav, hparams.sample_rate, hparams.highpass_cutoff) # Mu-law quantize if is_mulaw_quantize(hparams.input_type): # Trim silences in mul-aw quantized domain silence_threshold = 0 #if silence_threshold > 0: # [0, quantize_channels) # out = P.mulaw_quantize(wav, hparams.quantize_channels - 1) # start, end = audio.start_and_end_indices(out, silence_threshold) # wav = wav[start:end] constant_values = P.mulaw_quantize(0, hparams.quantize_channels - 1) out_dtype = np.int16 elif is_mulaw(hparams.input_type): # [-1, 1] constant_values = P.mulaw(0.0, hparams.quantize_channels - 1) out_dtype = np.float32 else: # [-1, 1] constant_values = 0.0 out_dtype = np.float32 # Compute a mel-scale spectrogram from the trimmed wav: # (N, D) mel_spectrogram = audio.logmelspectrogram(wav).astype(np.float32).T if hparams.global_gain_scale > 0: wav *= hparams.global_gain_scale # Time domain preprocessing if hparams.preprocess is not None and hparams.preprocess not in [ "", "none" ]: f = getattr(audio, hparams.preprocess) wav = f(wav) # Clip if np.abs(wav).max() > 1.0: print("""Warning: abs max value exceeds 1.0: {}""".format( np.abs(wav).max())) # ignore this sample return ("dummy", "dummy", -1, "dummy") wav = np.clip(wav, -1.0, 1.0) # Set waveform target (out) if is_mulaw_quantize(hparams.input_type): out = P.mulaw_quantize(wav, hparams.quantize_channels - 1) elif is_mulaw(hparams.input_type): out = P.mulaw(wav, hparams.quantize_channels - 1) else: out = wav #print(len(wav)) # zero pad # this is needed to adjust time resolution between audio and mel-spectrogram l, r = audio.pad_lr(out, hparams.fft_size, audio.get_hop_size()) if l > 0 or r > 0: out = np.pad(out, (l, r), mode="constant", constant_values=constant_values) N = mel_spectrogram.shape[0] assert len(out) >= N * audio.get_hop_size() # time resolution adjustment # ensure length of raw audio is multiple of hop_size so that we can use # transposed convolution to upsample out = out[:N * audio.get_hop_size()] assert len(out) % audio.get_hop_size() == 0 # Write the spectrograms to disk: name = splitext(basename(wav_path))[0] audio_filename = '%s-wave.npy' % (name) mel_filename = '%s-feats.npy' % (name) spectrogram = '%s-img.png' % (name) from PIL import Image np.save(os.path.join(out_dir, audio_filename), out.astype(out_dtype), allow_pickle=False) np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.astype(np.float32), allow_pickle=False) print("mel_max: " + str(np.max(mel_spectrogram.astype(np.float32)))) print("mel_min: " + str(np.min(mel_spectrogram.astype(np.float32)))) print("mel_shape: " + str(mel_spectrogram.astype(np.float32).shape)) #Save as image img = audio.mel2png(mel_spectrogram.astype(np.float32)) #print("Shape of img before save : " + str(img.shape)) spec_path = os.path.join(out_dir, spectrogram) # save as PNG io.imsave(spec_path, img, check_contrast=False) #Image.fromarray(img).save(os.path.join(out_dir, spectrogram)) # Return a tuple describing this training example: mel_back = audio.png2mel(io.imread(spec_path)) #print("Shape of image after save: " + str(mel_back.shape)) #print("Subtraction: " + str(mel_back - mel_spectrogram)) return (audio_filename, mel_filename, N, text)
def _process_utterance(mel_dir, linear_dir, wav_dir, index, wav_path, text, hparams, step_factor=1): """ Preprocesses a single utterance wav/text pair this writes the mel scale spectogram to disk and return a tuple to write to the train.txt file Args: - mel_dir: the directory to write the mel spectograms into - linear_dir: the directory to write the linear spectrograms into - wav_dir: the directory to write the preprocessed wav into - index: the numeric index to use in the spectogram filename - wav_path: path to the audio file containing the speech input - text: text spoken in the input audio file - hparams: hyper parameters Returns: - A tuple: (audio_filename, mel_filename, linear_filename, time_steps, mel_frames, linear_frames, text) """ try: # Load the audio as numpy array wav = audio.load_wav(wav_path, sr=hparams.sample_rate * step_factor) if step_factor > 1: wav = wav[::step_factor] audio_time = len(wav) / hparams.sample_rate except FileNotFoundError: #catch missing wav exception print( 'file {} present in csv metadata is not present in wav folder. skipping!' .format(wav_path)) return None #Trim lead/trail silences if hparams.trim_silence: wav = audio.trim_silence(wav, hparams) #Pre-emphasize preem_wav = audio.preemphasis(wav, hparams.preemphasis, hparams.preemphasize) #rescale wav if hparams.rescale: wav = wav / np.abs(wav).max() * hparams.rescaling_max preem_wav = preem_wav / np.abs(preem_wav).max() * hparams.rescaling_max #Assert all audio is in [-1, 1] if (wav > 1.).any() or (wav < -1.).any(): raise RuntimeError('wav has invalid value: {}'.format(wav_path)) if (preem_wav > 1.).any() or (preem_wav < -1.).any(): raise RuntimeError('wav has invalid value: {}'.format(wav_path)) #Mu-law quantize if is_mulaw_quantize(hparams.input_type): #[0, quantize_channels) out = mulaw_quantize(wav, hparams.quantize_channels) #Trim silences start, end = audio.start_and_end_indices(out, hparams.silence_threshold) wav = wav[start:end] preem_wav = preem_wav[start:end] out = out[start:end] constant_values = mulaw_quantize(0, hparams.quantize_channels) out_dtype = np.int16 elif is_mulaw(hparams.input_type): #[-1, 1] out = mulaw(wav, hparams.quantize_channels) constant_values = mulaw(0., hparams.quantize_channels) out_dtype = np.float32 else: #[-1, 1] out = wav constant_values = 0. out_dtype = np.float32 # Compute the mel scale spectrogram from the wav mel_spectrogram = audio.melspectrogram(preem_wav, hparams).astype(np.float32) mel_frames = mel_spectrogram.shape[1] if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length: return None #Compute the linear scale spectrogram from the wav linear_spectrogram = audio.linearspectrogram(preem_wav, hparams).astype(np.float32) linear_frames = linear_spectrogram.shape[1] #sanity check assert linear_frames == mel_frames if hparams.use_lws: #Ensure time resolution adjustement between audio and mel-spectrogram fft_size = hparams.n_fft if hparams.win_size is None else hparams.win_size l, r = audio.pad_lr(wav, fft_size, audio.get_hop_size(hparams)) #Zero pad audio signal out = np.pad(out, (l, r), mode='constant', constant_values=constant_values) else: #Ensure time resolution adjustement between audio and mel-spectrogram l_pad, r_pad = audio.librosa_pad_lr(wav, hparams.n_fft, audio.get_hop_size(hparams), hparams.wavenet_pad_sides) #Reflect pad audio signal on the right (Just like it's done in Librosa to avoid frame inconsistency) out = np.pad(out, (l_pad, r_pad), mode='constant', constant_values=constant_values) assert len(out) >= mel_frames * audio.get_hop_size(hparams) #time resolution adjustement #ensure length of raw audio is multiple of hop size so that we can use #transposed convolution to upsample out = out[:mel_frames * audio.get_hop_size(hparams)] assert len(out) % audio.get_hop_size(hparams) == 0 time_steps = len(out) # Write the spectrogram and audio to disk audio_filename = 'audio-{}.npy'.format(index) mel_filename = 'mel-{}.npy'.format(index) linear_filename = 'linear-{}.npy'.format(index) np.save(os.path.join(wav_dir, audio_filename), out.astype(out_dtype), allow_pickle=False) np.save(os.path.join(mel_dir, mel_filename), mel_spectrogram.T, allow_pickle=False) np.save(os.path.join(linear_dir, linear_filename), linear_spectrogram.T, allow_pickle=False) # Return a tuple describing this training example return (wav_path, audio_filename, mel_filename, linear_filename, time_steps, mel_frames, audio_time, text, len(text))