def vocode(self): speaker_name, spec, breaks, _ = self.current_generated assert spec is not None # Synthesize the waveform if not vocoder.is_loaded(): self.init_vocoder() def vocoder_progress(i, seq_len, b_size, gen_rate): real_time_factor = (gen_rate / Synthesizer.sample_rate) * 1000 line = "Waveform generation: %d/%d (batch size: %d, rate: %.1fkHz - %.2fx real time)" \ % (i * b_size, seq_len * b_size, b_size, gen_rate, real_time_factor) self.ui.log(line, "overwrite") self.ui.set_loading(i, seq_len) if self.ui.current_vocoder_fpath is not None: self.ui.log("") wav = vocoder.infer_waveform(spec, progress_callback=vocoder_progress) else: self.ui.log("Waveform generation with Griffin-Lim... ") wav = Synthesizer.griffin_lim(spec) self.ui.set_loading(0) self.ui.log(" Done!", "append") # Add breaks b_ends = np.cumsum(np.array(breaks) * Synthesizer.hparams.hop_size) b_starts = np.concatenate(([0], b_ends[:-1])) wavs = [wav[start:end] for start, end, in zip(b_starts, b_ends)] breaks = [np.zeros(int(0.15 * Synthesizer.sample_rate))] * len(breaks) wav = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)]) # Play it wav = wav / np.abs(wav).max() * 0.97 self.ui.play(wav, Synthesizer.sample_rate) fref = '-'.join([self.ui.current_dataset_name, self.ui.current_speaker_name, self.ui.current_utterance_name]) ftime = '{}'.format(int(time.time())) ftext = self.ui.text_prompt.toPlainText() fms = int(len(wav) * 1000 / Synthesizer.sample_rate) fname = filename_formatter('{}_{}_{}ms_{}.wav'.format(fref, ftime, fms, ftext)) audio.save_wav(wav, self._out_wav_dir.joinpath(fname), Synthesizer.sample_rate) # save # Compute the embedding # TODO: this is problematic with different sampling rates, gotta fix it if not encoder.is_loaded(): self.init_encoder() encoder_wav = encoder.preprocess_wav(wav) embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True) # Add the utterance name = speaker_name + "_gen_%05d" % int(time.time()) utterance = Utterance(name, speaker_name, wav, spec, embed, partial_embeds, True) np.save(self._out_embed_dir.joinpath(name + '.npy'), embed, allow_pickle=False) # save self.utterances.add(utterance) # Plot it self.ui.draw_embed(embed, name, "generated") self.ui.draw_umap_projections(self.utterances)
def record(self): wav = self.ui.record_one(encoder.sampling_rate, 5) if wav is None: return self.ui.play(wav, encoder.sampling_rate) speaker_name = "user01" name = speaker_name + "_rec_%d" % int(time.time()) audio.save_wav(wav, self._out_record_dir.joinpath(name + '.wav'), encoder.sampling_rate) # save self.add_real_utterance(wav, name, speaker_name)
def record(self): wav = self.ui.record_one(encoder.sampling_rate, 5) if wav is None: return self.ui.play(wav, encoder.sampling_rate) speaker_name = "user01" name = speaker_name + "_rec_{}".format(time_formatter()) fpath = self._out_record_dir.joinpath(name + '.wav') audio.save_wav(wav, fpath, encoder.sampling_rate) # save wav = Synthesizer.load_preprocess_wav(fpath) # 保持一致的数据格式 self.add_real_utterance(wav, name, speaker_name)
def run_mel_strip(): import numpy as np from tools.spec_processor import find_silences from synthesizer.utils.audio import inv_mel_spectrogram, save_wav from synthesizer.hparams import hparams from matplotlib import pyplot as plt inpath = Path( r'E:\lab\zhrtvc\zhrtvc\toolbox\saved_files\mels\wavs-P00173I-001_20170001P00173I0068.wav_1567509749_我家朵朵是世界上最漂亮的朵朵。。知道自己是什么样的人。要做什么。无需活在别人非议或期待里。你勤奋.npy') data = np.load(inpath) data = data.T print(data.shape) end_idx = find_silences(data, min_silence_sec=0.5, hop_silence_sec=0.2) print(end_idx, len(data)) out_dir = Path(r'data/syns') for i, pair in enumerate(zip(end_idx[:-1], end_idx[1:]), 1): a, b = pair wav = inv_mel_spectrogram(data[a[-1]: b[0]].T, hparams) save_wav(wav, out_dir.joinpath(f'sil-{i:02d}.wav'), hparams.sample_rate) plt.imshow(data.T) plt.colorbar() plt.show()
generated_wav = audio.inv_melspectrogram(spec, hparams=audio.melgan_hparams) # generated_wav = synthesizer.griffin_lim(spec, hparams=synthesizer.hparams) # generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant") # Play the audio (non-blocking) # if not args.no_sound: # sd.stop() # sd.play(generated_wav, synthesizer.sample_rate) # sd.wait() # Save it on the disk cur_time = time.strftime('%Y%m%d_%H%M%S') fpath = args.out_dir.joinpath("demo_out_{}.wav".format(cur_time)) # librosa.output.write_wav(fpath, generated_wav.astype(np.float32), synthesizer.sample_rate) audio.save_wav(generated_wav, fpath, synthesizer.sample_rate) # save ref_path = args.out_dir.joinpath("demo_ref_{}.mp3".format(cur_time)) shutil.copyfile(in_fpath, ref_path) txt_path = args.out_dir.joinpath("info_dict.txt".format(cur_time)) with open(txt_path, 'at', encoding='utf8') as fout: dt = dict(text=text, audio_path=str(fpath), reference_path=str(in_fpath), time=cur_time) out = json.dumps(dt, ensure_ascii=False) fout.write('{}\n'.format(out)) num_generated += 1 print("\nSaved output as %s\n\n" % fpath) except Exception as e: print("Caught exception: %s" % repr(e)) print("Restarting\n")
def vocode(self): speaker_name, spec, breaks, _ = self.current_generated assert spec is not None # Synthesize the waveform if not vocoder.is_loaded(): self.init_vocoder() def vocoder_progress(i, seq_len, b_size, gen_rate): real_time_factor = (gen_rate / Synthesizer.sample_rate) * 1000 line = "Waveform generation: %d/%d (batch size: %d, rate: %.1fkHz - %.2fx real time)" \ % (i * b_size, seq_len * b_size, b_size, gen_rate, real_time_factor) self.ui.log(line, "overwrite") self.ui.set_loading(i, seq_len) wav = None vocname = "" if self.ui.current_vocoder_fpath is not None: model_fpath = self.ui.current_vocoder_fpath vocname = Path(model_fpath).parent.stem if Path(model_fpath).parent.stem == "melgan": self.ui.log("Waveform generation with MelGAN... ") wav = vocoder_melgan.infer_waveform_melgan(spec, model_fpath) elif Path(model_fpath).parent.stem == "wavernn": self.ui.log("Waveform generation with WaveRNN... ") wav = vocoder.infer_waveform(spec, progress_callback=vocoder_progress) if wav is None: vocname = "griffinlim" self.ui.log("Waveform generation with Griffin-Lim... ") wav = Synthesizer.griffin_lim(spec) self.ui.set_loading(0) self.ui.log(" Done!", "append") # Play it wav = wav / np.abs(wav).max() * 0.97 self.ui.play(wav, Synthesizer.sample_rate) fref = self.ui.selected_utterance.name ftime = '{}'.format(time_formatter()) ftext = self.ui.text_prompt.toPlainText() fms = int(len(wav) * 1000 / Synthesizer.sample_rate) fvoc = vocname fname = filename_formatter('{}_{}_{}_{}ms_{}.wav'.format(fref, ftime, fvoc, fms, ftext)) audio.save_wav(wav, self._out_wav_dir.joinpath(fname), Synthesizer.sample_rate) # save # Compute the embedding # TODO: this is problematic with different sampling rates, gotta fix it if not encoder.is_loaded(): self.init_encoder() encoder_wav = encoder.preprocess_wav(wav) embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True) # Add the utterance name = speaker_name + "_gen_{}".format(time_formatter()) utterance = Utterance(name, speaker_name, wav, spec, embed, partial_embeds, True) np.save(self._out_embed_dir.joinpath(name + '.npy'), embed, allow_pickle=False) # save self.utterances.add(utterance) # Plot it self.ui.draw_embed(embed, name, "generated") self.ui.draw_umap_projections(self.utterances)
def synthesize(self, texts, basenames, out_dir, log_dir, mel_filenames, embed_filenames): hparams = self._hparams cleaner_names = [x.strip() for x in hparams.cleaners.split(",")] assert 0 == len(texts) % self._hparams.tacotron_num_gpus seqs = [np.asarray(text_to_sequence(text)) for text in texts] input_lengths = [len(seq) for seq in seqs] size_per_device = len(seqs) // self._hparams.tacotron_num_gpus # Pad inputs according to each GPU max length input_seqs = None split_infos = [] for i in range(self._hparams.tacotron_num_gpus): device_input = seqs[size_per_device * i:size_per_device * (i + 1)] device_input, max_seq_len = self._prepare_inputs(device_input) input_seqs = np.concatenate( (input_seqs, device_input), axis=1) if input_seqs is not None else device_input split_infos.append([max_seq_len, 0, 0, 0]) feed_dict = { self.inputs: input_seqs, self.input_lengths: np.asarray(input_lengths, dtype=np.int32), } if self.gta: np_targets = [ np.load(mel_filename) for mel_filename in mel_filenames ] target_lengths = [len(np_target) for np_target in np_targets] # pad targets according to each GPU max length target_seqs = None for i in range(self._hparams.tacotron_num_gpus): device_target = np_targets[size_per_device * i:size_per_device * (i + 1)] device_target, max_target_len = self._prepare_targets( device_target, self._hparams.outputs_per_step) target_seqs = np.concatenate( (target_seqs, device_target), axis=1) if target_seqs is not None else device_target split_infos[i][ 1] = max_target_len # Not really used but setting it in case for future development maybe? feed_dict[self.targets] = target_seqs assert len(np_targets) == len(texts) feed_dict[self.split_infos] = np.asarray(split_infos, dtype=np.int32) feed_dict[self.speaker_embeddings] = [ np.load(f) for f in embed_filenames ] if self.gta or not hparams.predict_linear: mels, alignments, stop_tokens = self.session.run( [ self.mel_outputs, self.alignments, self.stop_token_prediction ], feed_dict=feed_dict) # Linearize outputs (1D arrays) mels = [mel for gpu_mels in mels for mel in gpu_mels] alignments = [ align for gpu_aligns in alignments for align in gpu_aligns ] stop_tokens = [ token for gpu_token in stop_tokens for token in gpu_token ] if not self.gta: # Natural batch synthesis # Get Mel lengths for the entire batch from stop_tokens predictions target_lengths = self._get_output_lengths(stop_tokens) # Take off the batch wise padding mels = [ mel[:target_length, :] for mel, target_length in zip(mels, target_lengths) ] assert len(mels) == len(texts) else: linears, mels, alignments, stop_tokens = self.session.run( [ self.linear_outputs, self.mel_outputs, self.alignments, self.stop_token_prediction ], feed_dict=feed_dict) # Linearize outputs (1D arrays) linears = [ linear for gpu_linear in linears for linear in gpu_linear ] mels = [mel for gpu_mels in mels for mel in gpu_mels] alignments = [ align for gpu_aligns in alignments for align in gpu_aligns ] stop_tokens = [ token for gpu_token in stop_tokens for token in gpu_token ] # Natural batch synthesis # Get Mel/Linear lengths for the entire batch from stop_tokens predictions # target_lengths = self._get_output_lengths(stop_tokens) target_lengths = [9999] # Take off the batch wise padding mels = [ mel[:target_length, :] for mel, target_length in zip(mels, target_lengths) ] linears = [ linear[:target_length, :] for linear, target_length in zip(linears, target_lengths) ] assert len(mels) == len(linears) == len(texts) if basenames is None: raise NotImplemented() saved_mels_paths = [] for i, mel in enumerate(mels): # Write the spectrogram to disk # Note: outputs mel-spectrogram files and target ones have same names, just different folders mel_filename = os.path.join(out_dir, "mel-{}.npy".format(basenames[i])) np.save(mel_filename, mel, allow_pickle=False) saved_mels_paths.append(mel_filename) if log_dir is not None: # save wav (mel -> wav) wav = audio.inv_mel_spectrogram(mel.T, hparams) audio.save_wav(wav, os.path.join( log_dir, "wavs/wav-{}-mel.wav".format(basenames[i])), sr=hparams.sample_rate) # save alignments plot.plot_alignment(alignments[i], os.path.join( log_dir, "plots/alignment-{}.png".format( basenames[i])), title="{}".format(texts[i]), split_title=True, max_len=target_lengths[i]) # save mel spectrogram plot plot.plot_spectrogram( mel, os.path.join(log_dir, "plots/mel-{}.png".format(basenames[i])), title="{}".format(texts[i]), split_title=True) if hparams.predict_linear: # save wav (linear -> wav) wav = audio.inv_linear_spectrogram(linears[i].T, hparams) audio.save_wav(wav, os.path.join( log_dir, "wavs/wav-{}-linear.wav".format( basenames[i])), sr=hparams.sample_rate) # save linear spectrogram plot plot.plot_spectrogram(linears[i], os.path.join( log_dir, "plots/linear-{}.png".format( basenames[i])), title="{}".format(texts[i]), split_title=True, auto_aspect=True) return saved_mels_paths
def train(log_dir, args, hparams): save_dir = os.path.join(log_dir, "checkpoints") plot_dir = os.path.join(log_dir, "plots") wav_dir = os.path.join(log_dir, "wavs") mel_dir = os.path.join(log_dir, "spectrograms") eval_dir = os.path.join(log_dir, "evals") eval_plot_dir = os.path.join(eval_dir, "plots") eval_wav_dir = os.path.join(eval_dir, "wavs") tensorboard_dir = os.path.join(log_dir, "events") meta_folder = os.path.join(log_dir, "metas") os.makedirs(save_dir, exist_ok=True) os.makedirs(plot_dir, exist_ok=True) os.makedirs(wav_dir, exist_ok=True) os.makedirs(mel_dir, exist_ok=True) os.makedirs(eval_dir, exist_ok=True) os.makedirs(eval_plot_dir, exist_ok=True) os.makedirs(eval_wav_dir, exist_ok=True) os.makedirs(tensorboard_dir, exist_ok=True) os.makedirs(meta_folder, exist_ok=True) checkpoint_fpath = os.path.join(save_dir, "model.ckpt") metadat_fpath = os.path.join(args.synthesizer_root, "train.txt") log("Checkpoint path: {}".format(checkpoint_fpath)) log("Loading training data from: {}".format(metadat_fpath)) log("Using model: Tacotron") log(hparams_debug_string()) # Start by setting a seed for repeatability tf.set_random_seed(hparams.tacotron_random_seed) # Set up data feeder coord = tf.train.Coordinator() with tf.variable_scope("datafeeder") as scope: feeder = Feeder(coord, metadat_fpath, hparams) # Set up model: global_step = tf.Variable(0, name="global_step", trainable=False) model, stats = model_train_mode(args, feeder, hparams, global_step) eval_model = model_test_mode(args, feeder, hparams, global_step) # Embeddings metadata char_embedding_meta = os.path.join(meta_folder, "CharacterEmbeddings.tsv") if not os.path.isfile(char_embedding_meta): with open(char_embedding_meta, "w", encoding="utf-8") as f: for symbol in symbols: if symbol == " ": symbol = "\\s" # For visual purposes, swap space with \s f.write("{}\n".format(symbol)) char_embedding_meta = char_embedding_meta.replace(log_dir, "..") path = os.path.join(meta_folder, "symbols.json") obj = symbols json_dump(obj, path) path = os.path.join(meta_folder, "args.json") obj = args2dict(args) json_dump(obj, path) path = os.path.join(meta_folder, "hparams.json") obj = hparams.values() json_dump(obj, path) # Book keeping step = 0 time_window = ValueWindow(100) loss_window = ValueWindow(100) saver = tf.train.Saver(max_to_keep=5) log("Tacotron training set to a maximum of {} steps".format( args.tacotron_train_steps)) # Memory allocation on the GPU as needed config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True # Train with tf.Session(config=config) as sess: try: summary_writer = tf.summary.FileWriter(tensorboard_dir, sess.graph) sess.run(tf.global_variables_initializer()) # saved model restoring if args.restore: # Restore saved model if the user requested it, default = True try: checkpoint_state = tf.train.get_checkpoint_state(save_dir) if checkpoint_state and checkpoint_state.model_checkpoint_path: log("Loading checkpoint {}".format( checkpoint_state.model_checkpoint_path), slack=True) saver.restore(sess, checkpoint_state.model_checkpoint_path) else: log("No model to load at {}".format(save_dir), slack=True) saver.save(sess, checkpoint_fpath, global_step=global_step) except tf.errors.OutOfRangeError as e: log("Cannot restore checkpoint: {}".format(e), slack=True) else: log("Starting new training!", slack=True) saver.save(sess, checkpoint_fpath, global_step=global_step) # initializing feeder feeder.start_threads(sess) init_step, init_loss, init_opt = sess.run( [global_step, model.loss, model.optimize]) init_step = int(str(init_step)) - 1 # Training loop while not coord.should_stop() and step < args.tacotron_train_steps: start_time = time.time() step, loss, opt = sess.run( [global_step, model.loss, model.optimize]) time_window.append(time.time() - start_time) loss_window.append(loss) message = "Step {:7d} [{:.3f} sec/step, loss={:.5f}, avg_loss={:.5f}]".format( step, time_window.average, loss, loss_window.average) print(message) if loss > 100 or np.isnan(loss): log("Loss exploded to {:.5f} at step {}".format( loss, step)) raise Exception("Loss exploded") if step % args.summary_interval == 0 or step == init_step + 100: log("\nWriting summary at step {}".format(step)) summary_writer.add_summary(sess.run(stats), step) log(message, end="\r", slack=(step % args.checkpoint_interval == 0)) if step % args.eval_interval == 0 or step == init_step + 100: # Run eval and save eval stats log("\nRunning evaluation at step {}".format(step)) eval_losses = [] before_losses = [] after_losses = [] stop_token_losses = [] linear_losses = [] linear_loss = None if hparams.predict_linear: for i in tqdm(range(feeder.test_steps)): eloss, before_loss, after_loss, stop_token_loss, linear_loss, mel_p, \ mel_t, t_len, align, lin_p, lin_t = sess.run( [ eval_model.tower_loss[0], eval_model.tower_before_loss[0], eval_model.tower_after_loss[0], eval_model.tower_stop_token_loss[0], eval_model.tower_linear_loss[0], eval_model.tower_mel_outputs[0][0], eval_model.tower_mel_targets[0][0], eval_model.tower_targets_lengths[0][0], eval_model.tower_alignments[0][0], eval_model.tower_linear_outputs[0][0], eval_model.tower_linear_targets[0][0], ]) eval_losses.append(eloss) before_losses.append(before_loss) after_losses.append(after_loss) stop_token_losses.append(stop_token_loss) linear_losses.append(linear_loss) linear_loss = sum(linear_losses) / len(linear_losses) wav = audio.inv_linear_spectrogram(lin_p.T, hparams) audio.save_wav( wav, os.path.join( eval_wav_dir, "step-{}-eval-wave-from-linear.wav".format( step)), sr=hparams.sample_rate) else: for i in tqdm(range(feeder.test_steps)): eloss, before_loss, after_loss, stop_token_loss, mel_p, mel_t, t_len, \ align = sess.run( [ eval_model.tower_loss[0], eval_model.tower_before_loss[0], eval_model.tower_after_loss[0], eval_model.tower_stop_token_loss[0], eval_model.tower_mel_outputs[0][0], eval_model.tower_mel_targets[0][0], eval_model.tower_targets_lengths[0][0], eval_model.tower_alignments[0][0] ]) eval_losses.append(eloss) before_losses.append(before_loss) after_losses.append(after_loss) stop_token_losses.append(stop_token_loss) eval_loss = sum(eval_losses) / len(eval_losses) before_loss = sum(before_losses) / len(before_losses) after_loss = sum(after_losses) / len(after_losses) stop_token_loss = sum(stop_token_losses) / len( stop_token_losses) log("Saving eval log to {}..".format(eval_dir)) # Save some log to monitor model improvement on same unseen sequence wav = audio.inv_mel_spectrogram(mel_p.T, hparams) audio.save_wav( wav, os.path.join( eval_wav_dir, "step-{}-eval-wave-from-mel.wav".format(step)), sr=hparams.sample_rate) plot.plot_alignment( align, os.path.join(eval_plot_dir, "step-{}-eval-align.png".format(step)), title="{}, {}, step={}, loss={:.5f}".format( "Tacotron", time_string(), step, eval_loss), max_len=t_len // hparams.outputs_per_step) plot.plot_spectrogram( mel_p, os.path.join( eval_plot_dir, "step-{}-eval-mel-spectrogram.png".format(step)), title="{}, {}, step={}, loss={:.5f}".format( "Tacotron", time_string(), step, eval_loss), target_spectrogram=mel_t, max_len=t_len) if hparams.predict_linear: plot.plot_spectrogram( lin_p, os.path.join( eval_plot_dir, "step-{}-eval-linear-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format( "Tacotron", time_string(), step, eval_loss), target_spectrogram=lin_t, max_len=t_len, auto_aspect=True) log("Eval loss for global step {}: {:.3f}".format( step, eval_loss)) log("Writing eval summary!") add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, eval_loss) if step % args.checkpoint_interval == 0 or step == args.tacotron_train_steps or step == init_step + 100: # Save model and current global step saver.save(sess, checkpoint_fpath, global_step=global_step) log("\nSaving alignment, Mel-Spectrograms and griffin-lim inverted waveform.." ) input_seq, mel_prediction, alignment, target, target_length = sess.run( [ model.tower_inputs[0][0], model.tower_mel_outputs[0][0], model.tower_alignments[0][0], model.tower_mel_targets[0][0], model.tower_targets_lengths[0][0], ]) # save predicted mel spectrogram to disk (debug) mel_filename = "mel-prediction-step-{}.npy".format(step) np.save(os.path.join(mel_dir, mel_filename), mel_prediction.T, allow_pickle=False) # save griffin lim inverted wav for debug (mel -> wav) wav = audio.inv_mel_spectrogram(mel_prediction.T, hparams) audio.save_wav( wav, os.path.join(wav_dir, "step-{}-wave-from-mel.wav".format(step)), sr=hparams.sample_rate) # save alignment plot to disk (control purposes) plot.plot_alignment( alignment, os.path.join(plot_dir, "step-{}-align.png".format(step)), title="{}, {}, step={}, loss={:.5f}".format( "Tacotron", time_string(), step, loss), max_len=target_length // hparams.outputs_per_step) # save real and predicted mel-spectrogram plot to disk (control purposes) plot.plot_spectrogram( mel_prediction, os.path.join( plot_dir, "step-{}-mel-spectrogram.png".format(step)), title="{}, {}, step={}, loss={:.5f}".format( "Tacotron", time_string(), step, loss), target_spectrogram=target, max_len=target_length) log("Input at step {}: {}".format( step, sequence_to_text(input_seq))) if step % args.embedding_interval == 0 or step == args.tacotron_train_steps or step == init_step + 100: # Get current checkpoint state checkpoint_state = tf.train.get_checkpoint_state(save_dir) # Update Projector log("\nSaving Model Character Embeddings visualization..") add_embedding_stats(summary_writer, [model.embedding_table.name], [char_embedding_meta], checkpoint_state.model_checkpoint_path) log("Tacotron Character embeddings have been updated on tensorboard!" ) log("Tacotron training complete after {} global steps!".format( args.tacotron_train_steps), slack=True) return save_dir except Exception as e: log("Exiting due to exception: {}".format(e), slack=True) traceback.print_exc() coord.request_stop(e)