def get_data_loader(model_name, audiopaths_and_text, args): if model_name == 'Tacotron2': data_loader = TextMelLoader(audiopaths_and_text, args) elif model_name == 'WaveGlow': data_loader = MelAudioLoader(audiopaths_and_text, args) else: raise NotImplementedError( "unknown data loader requested: {}".format(model_name)) return data_loader
def get_data_loader(model_name, dataset_path, audiopaths_and_text, args, speaker_ids=None): if model_name == 'Tacotron2': if speaker_ids is not None: data_loader = TextMelLoader(dataset_path, audiopaths_and_text, args, speaker_ids=speaker_ids) else: data_loader = TextMelLoader(dataset_path, audiopaths_and_text, args) elif model_name == 'WaveGlow': data_loader = MelAudioLoader(dataset_path, audiopaths_and_text, args) else: raise NotImplementedError( "unknown data loader requested: {}".format(model_name)) return data_loader
def audio2mel(audiopaths_and_text, melpaths_and_text, args): melpaths_and_text_list = load_filepaths_and_text(melpaths_and_text) audiopaths_and_text_list = load_filepaths_and_text(audiopaths_and_text) data_loader = TextMelLoader(audiopaths_and_text, args) for i in range(len(melpaths_and_text_list)): if i % 100 == 0: print("done", i, "/", len(melpaths_and_text_list)) mel = data_loader.get_mel(audiopaths_and_text_list[i][0]) torch.save(mel, melpaths_and_text_list[i][0])
def audio2mel(dataset_path: str, audiopaths_and_text: str, melpaths_and_text: str, args: ArgumentParser) -> None: """Create mel spectrograms on disk from audio files. Args: dataset_path (str): Path to dataset audiopaths_and_text (str): Path to filelist with audio paths and text melpaths_and_text (str): Path to filelist with mel paths and text args (ArgumentParser): Namespace with arguments """ melpaths_and_text_list = load_filepaths_and_text(dataset_path, melpaths_and_text) audiopaths_and_text_list = load_filepaths_and_text(dataset_path, audiopaths_and_text) data_loader = TextMelLoader(dataset_path, audiopaths_and_text, args) for i, melpath_and_text in enumerate(melpaths_and_text_list): if i % 100 == 0: print("done", i, "/", len(melpaths_and_text_list)) mel = data_loader.get_mel(audiopaths_and_text_list[i][0]) torch.save(mel, melpath_and_text[0])
def audio2mel(dataset_path, audiopaths_and_text, melpaths_and_text, args, use_intermed=None): melpaths_and_text_list = \ load_filepaths_and_text(dataset_path, melpaths_and_text) audiopaths_and_text_list = \ load_filepaths_and_text(dataset_path, audiopaths_and_text) # n = 10 # print(f"The first {n} melpaths and text are {melpaths_and_text_list[:n]}") # print(f"The first {n} audiopaths and text are {audiopaths_and_text_list[:n]}") data_loader = TextMelLoader(dataset_path, audiopaths_and_text, args) for i in range(len(melpaths_and_text_list)): if i % 100 == 0: print("done", i, "/", len(melpaths_and_text_list)) mel = data_loader.get_mel(audiopaths_and_text_list[i][0]) torch.save(mel, melpaths_and_text_list[i][0])