def output_types(self): return { "z": NeuralType(('B', 'D', 'T'), NormalDistributionSamplesType()), "y_m": NeuralType(('B', 'D', 'T'), NormalDistributionMeanType()), "y_logs": NeuralType(('B', 'D', 'T'), NormalDistributionLogVarianceType()), "logdet": NeuralType(('B'), LogDeterminantType()), "log_durs_predicted": NeuralType(('B', 'T'), TokenLogDurationType()), "log_durs_extracted": NeuralType(('B', 'T'), TokenLogDurationType()), "spect_lengths": NeuralType(('B'), LengthsType()), "attn": NeuralType(('B', 'T', 'T'), SequenceToSequenceAlignmentType()), }
def _prepare_for_export(self, **kwargs): super()._prepare_for_export(**kwargs) # Define input_types and output_types as required by export() self._input_types = { "text": NeuralType(('B', 'T_text'), TokenIndex()), "pitch": NeuralType(('B', 'T_text'), RegressionValuesType()), "pace": NeuralType(('B', 'T_text'), optional=True), "volume": NeuralType(('B', 'T_text')), "speaker": NeuralType(('B'), Index()), } self._output_types = { "spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()), "num_frames": NeuralType(('B'), TokenDurationType()), "durs_predicted": NeuralType(('B', 'T_text'), TokenDurationType()), "log_durs_predicted": NeuralType(('B', 'T_text'), TokenLogDurationType()), "pitch_predicted": NeuralType(('B', 'T_text'), RegressionValuesType()), "volume_aligned": NeuralType(('B', 'T_spec'), RegressionValuesType()), }
def input_types(self): return { "log_duration_pred": NeuralType(('B', 'T'), TokenLogDurationType()), "duration_target": NeuralType(('B', 'T'), TokenDurationType()), "mask": NeuralType(('B', 'T', 'D'), MaskType()), }
def output_types(self): return { "x_m": NeuralType(('B', 'D', 'T'), NormalDistributionMeanType()), "x_logs": NeuralType(('B', 'D', 'T'), NormalDistributionLogVarianceType()), "logw": NeuralType(('B', 'T'), TokenLogDurationType()), "x_mask": NeuralType(('B', 'D', 'T'), MaskType()), }
def input_types(self): return { "log_durs_predicted": NeuralType(('B', 'T'), TokenLogDurationType()), "durs_tgt": NeuralType(('B', 'T'), TokenDurationType()), "len": NeuralType(('B'), LengthsType()), }
def input_types(self): return { "log_durs_predicted": NeuralType(('B', 'T'), TokenLogDurationType()), "pitch_predicted": NeuralType(('B', 'T'), RegressionValuesType()), "durs_tgt": NeuralType(('B', 'T'), TokenDurationType()), "dur_lens": NeuralType(('B'), LengthsType()), "pitch_tgt": NeuralType(('B', 'T'), RegressionValuesType()), }
def output_types(self): return { "spect": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), "spect_lens": NeuralType(('B'), SequenceToSequenceAlignmentType()), "spect_mask": NeuralType(('B', 'D', 'T'), MaskType()), "durs_predicted": NeuralType(('B', 'T'), TokenDurationType()), "log_durs_predicted": NeuralType(('B', 'T'), TokenLogDurationType()), "pitch_predicted": NeuralType(('B', 'T'), RegressionValuesType()), }
def output_types(self): return { "spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()), "num_frames": NeuralType(('B'), TokenDurationType()), "durs_predicted": NeuralType(('B', 'T_text'), TokenDurationType()), "log_durs_predicted": NeuralType(('B', 'T_text'), TokenLogDurationType()), "pitch_predicted": NeuralType(('B', 'T_text'), RegressionValuesType()), "attn_soft": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()), "attn_logprob": NeuralType(('B', 'S', 'T_spec', 'T_text'), LogprobsType()), "attn_hard": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()), "attn_hard_dur": NeuralType(('B', 'T_text'), TokenDurationType()), "pitch": NeuralType(('B', 'T_audio'), RegressionValuesType()), }
class FastSpeech2HifiGanE2EModel(TextToWaveform): """An end-to-end speech synthesis model based on FastSpeech2 and HiFiGan that converts strings to audio without using the intermediate mel spectrogram representation.""" def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): if isinstance(cfg, dict): cfg = OmegaConf.create(cfg) super().__init__(cfg=cfg, trainer=trainer) self.audio_to_melspec_precessor = instantiate(cfg.preprocessor) self.encoder = instantiate(cfg.encoder) self.variance_adapter = instantiate(cfg.variance_adaptor) self.generator = instantiate(cfg.generator) self.multiperioddisc = MultiPeriodDiscriminator() self.multiscaledisc = MultiScaleDiscriminator() self.melspec_fn = instantiate(cfg.preprocessor, highfreq=None, use_grads=True) self.mel_val_loss = L1MelLoss() self.durationloss = DurationLoss() self.feat_matching_loss = FeatureMatchingLoss() self.disc_loss = DiscriminatorLoss() self.gen_loss = GeneratorLoss() self.mseloss = torch.nn.MSELoss() self.energy = cfg.add_energy_predictor self.pitch = cfg.add_pitch_predictor self.mel_loss_coeff = cfg.mel_loss_coeff self.pitch_loss_coeff = cfg.pitch_loss_coeff self.energy_loss_coeff = cfg.energy_loss_coeff self.splice_length = cfg.splice_length self.use_energy_pred = False self.use_pitch_pred = False self.log_train_images = False self.logged_real_samples = False self._tb_logger = None self.sample_rate = cfg.sample_rate self.hop_size = cfg.hop_size # Parser and mappings are used for inference only. self.parser = parsers.make_parser(name='en') if 'mappings_filepath' in cfg: mappings_filepath = cfg.get('mappings_filepath') else: logging.error( "ERROR: You must specify a mappings.json file in the config file under model.mappings_filepath." ) mappings_filepath = self.register_artifact('mappings_filepath', mappings_filepath) with open(mappings_filepath, 'r') as f: mappings = json.load(f) self.word2phones = mappings['word2phones'] self.phone2idx = mappings['phone2idx'] @property def tb_logger(self): if self._tb_logger is None: if self.logger is None and self.logger.experiment is None: return None tb_logger = self.logger.experiment if isinstance(self.logger, LoggerCollection): for logger in self.logger: if isinstance(logger, TensorBoardLogger): tb_logger = logger.experiment break self._tb_logger = tb_logger return self._tb_logger def configure_optimizers(self): gen_params = chain( self.encoder.parameters(), self.generator.parameters(), self.variance_adapter.parameters(), ) disc_params = chain(self.multiscaledisc.parameters(), self.multiperioddisc.parameters()) opt1 = torch.optim.AdamW(disc_params, lr=self._cfg.lr) opt2 = torch.optim.AdamW(gen_params, lr=self._cfg.lr) num_procs = self._trainer.num_gpus * self._trainer.num_nodes num_samples = len(self._train_dl.dataset) batch_size = self._train_dl.batch_size iter_per_epoch = np.ceil(num_samples / (num_procs * batch_size)) max_steps = iter_per_epoch * self._trainer.max_epochs logging.info(f"MAX STEPS: {max_steps}") sch1 = NoamAnnealing(opt1, d_model=256, warmup_steps=3000, max_steps=max_steps, min_lr=1e-5) sch1_dict = { 'scheduler': sch1, 'interval': 'step', } sch2 = NoamAnnealing(opt2, d_model=256, warmup_steps=3000, max_steps=max_steps, min_lr=1e-5) sch2_dict = { 'scheduler': sch2, 'interval': 'step', } return [opt1, opt2], [sch1_dict, sch2_dict] @typecheck( input_types={ "text": NeuralType(('B', 'T'), TokenIndex()), "text_length": NeuralType(('B'), LengthsType()), "splice": NeuralType(optional=True), "spec_len": NeuralType(('B'), LengthsType(), optional=True), "durations": NeuralType(('B', 'T'), TokenDurationType(), optional=True), "pitch": NeuralType(('B', 'T'), RegressionValuesType(), optional=True), "energies": NeuralType(('B', 'T'), RegressionValuesType(), optional=True), }, output_types={ "audio": NeuralType(('B', 'S', 'T'), MelSpectrogramType()), "splices": NeuralType(), "log_dur_preds": NeuralType(('B', 'T'), TokenLogDurationType()), "pitch_preds": NeuralType(('B', 'T'), RegressionValuesType()), "energy_preds": NeuralType(('B', 'T'), RegressionValuesType()), "encoded_text_mask": NeuralType(('B', 'T', 'D'), MaskType()), }, ) def forward(self, *, text, text_length, splice=True, durations=None, pitch=None, energies=None, spec_len=None): encoded_text, encoded_text_mask = self.encoder(text=text, text_length=text_length) context, log_dur_preds, pitch_preds, energy_preds, spec_len = self.variance_adapter( x=encoded_text, x_len=text_length, dur_target=durations, pitch_target=pitch, energy_target=energies, spec_len=spec_len, ) gen_in = context splices = None if splice: # Splice generated spec output = [] splices = [] for i, sample in enumerate(context): start = np.random.randint( low=0, high=min(int(sample.size(0)), int(spec_len[i])) - self.splice_length) output.append(sample[start:start + self.splice_length, :]) splices.append(start) gen_in = torch.stack(output) output = self.generator(x=gen_in.transpose(1, 2)) return output, splices, log_dur_preds, pitch_preds, energy_preds, encoded_text_mask def training_step(self, batch, batch_idx, optimizer_idx): f, fl, t, tl, durations, pitch, energies = batch spec, spec_len = self.audio_to_melspec_precessor(f, fl) # train discriminator if optimizer_idx == 0: with torch.no_grad(): audio_pred, splices, _, _, _, _ = self( spec=spec, spec_len=spec_len, text=t, text_length=tl, durations=durations, pitch=pitch if not self.use_pitch_pred else None, energies=energies if not self.use_energy_pred else None, ) real_audio = [] for i, splice in enumerate(splices): real_audio.append( f[i, splice * self.hop_size:(splice + self.splice_length) * self.hop_size]) real_audio = torch.stack(real_audio).unsqueeze(1) real_score_mp, gen_score_mp, _, _ = self.multiperioddisc( real_audio, audio_pred) real_score_ms, gen_score_ms, _, _ = self.multiscaledisc( real_audio, audio_pred) loss_mp, loss_mp_real, _ = self.disc_loss(real_score_mp, gen_score_mp) loss_ms, loss_ms_real, _ = self.disc_loss(real_score_ms, gen_score_ms) loss_mp /= len(loss_mp_real) loss_ms /= len(loss_ms_real) loss_disc = loss_mp + loss_ms self.log("loss_discriminator", loss_disc, prog_bar=True) self.log("loss_discriminator_ms", loss_ms) self.log("loss_discriminator_mp", loss_mp) return loss_disc # train generator elif optimizer_idx == 1: audio_pred, splices, log_dur_preds, pitch_preds, energy_preds, encoded_text_mask = self( spec=spec, spec_len=spec_len, text=t, text_length=tl, durations=durations, pitch=pitch if not self.use_pitch_pred else None, energies=energies if not self.use_energy_pred else None, ) real_audio = [] for i, splice in enumerate(splices): real_audio.append( f[i, splice * self.hop_size:(splice + self.splice_length) * self.hop_size]) real_audio = torch.stack(real_audio).unsqueeze(1) # Do HiFiGAN generator loss audio_length = torch.tensor([ self.splice_length * self.hop_size for _ in range(real_audio.shape[0]) ]).to(real_audio.device) real_spliced_spec, _ = self.melspec_fn(real_audio.squeeze(), seq_len=audio_length) pred_spliced_spec, _ = self.melspec_fn(audio_pred.squeeze(), seq_len=audio_length) loss_mel = torch.nn.functional.l1_loss(real_spliced_spec, pred_spliced_spec) loss_mel *= self.mel_loss_coeff _, gen_score_mp, real_feat_mp, gen_feat_mp = self.multiperioddisc( real_audio, audio_pred) _, gen_score_ms, real_feat_ms, gen_feat_ms = self.multiscaledisc( real_audio, audio_pred) loss_gen_mp, list_loss_gen_mp = self.gen_loss(gen_score_mp) loss_gen_ms, list_loss_gen_ms = self.gen_loss(gen_score_ms) loss_gen_mp /= len(list_loss_gen_mp) loss_gen_ms /= len(list_loss_gen_ms) total_loss = loss_gen_mp + loss_gen_ms + loss_mel loss_feat_mp = self.feat_matching_loss(real_feat_mp, gen_feat_mp) loss_feat_ms = self.feat_matching_loss(real_feat_ms, gen_feat_ms) total_loss += loss_feat_mp + loss_feat_ms self.log(name="loss_gen_disc_feat", value=loss_feat_mp + loss_feat_ms) self.log(name="loss_gen_disc_feat_ms", value=loss_feat_ms) self.log(name="loss_gen_disc_feat_mp", value=loss_feat_mp) self.log(name="loss_gen_mel", value=loss_mel) self.log(name="loss_gen_disc", value=loss_gen_mp + loss_gen_ms) self.log(name="loss_gen_disc_mp", value=loss_gen_mp) self.log(name="loss_gen_disc_ms", value=loss_gen_ms) dur_loss = self.durationloss(log_duration_pred=log_dur_preds, duration_target=durations.float(), mask=encoded_text_mask) self.log(name="loss_gen_duration", value=dur_loss) total_loss += dur_loss if self.pitch: pitch_loss = self.mseloss( pitch_preds, pitch.float()) * self.pitch_loss_coeff total_loss += pitch_loss self.log(name="loss_gen_pitch", value=pitch_loss) if self.energy: energy_loss = self.mseloss(energy_preds, energies) * self.energy_loss_coeff total_loss += energy_loss self.log(name="loss_gen_energy", value=energy_loss) # Log images to tensorboard if self.log_train_images: self.log_train_images = False if self.logger is not None and self.logger.experiment is not None: self.tb_logger.add_image( "train_mel_target", plot_spectrogram_to_numpy( real_spliced_spec[0].data.cpu().numpy()), self.global_step, dataformats="HWC", ) spec_predict = pred_spliced_spec[0].data.cpu().numpy() self.tb_logger.add_image( "train_mel_predicted", plot_spectrogram_to_numpy(spec_predict), self.global_step, dataformats="HWC", ) self.log(name="loss_gen", prog_bar=True, value=total_loss) return total_loss def validation_step(self, batch, batch_idx): f, fl, t, tl, _, _, _ = batch spec, spec_len = self.audio_to_melspec_precessor(f, fl) audio_pred, _, _, _, _, _ = self(spec=spec, spec_len=spec_len, text=t, text_length=tl, splice=False) audio_pred.squeeze_() pred_spec, _ = self.melspec_fn(audio_pred, seq_len=spec_len) loss = self.mel_val_loss(spec_pred=pred_spec, spec_target=spec, spec_target_len=spec_len, pad_value=-11.52) return { "val_loss": loss, "audio_target": f.squeeze() if batch_idx == 0 else None, "audio_pred": audio_pred if batch_idx == 0 else None, } def on_train_epoch_start(self): # Switch to using energy predictions after 50% of training if not self.use_energy_pred and self.current_epoch >= np.ceil( 0.5 * self._trainer.max_epochs): logging.info( f"Using energy predictions after epoch: {self.current_epoch}") self.use_energy_pred = True # Switch to using pitch predictions after 62.5% of training if not self.use_pitch_pred and self.current_epoch >= np.ceil( 0.625 * self._trainer.max_epochs): logging.info( f"Using pitch predictions after epoch: {self.current_epoch}") self.use_pitch_pred = True def validation_epoch_end(self, outputs): if self.tb_logger is not None: _, audio_target, audio_predict = outputs[0].values() if not self.logged_real_samples: self.tb_logger.add_audio("val_target", audio_target[0].data.cpu(), self.global_step, self.sample_rate) self.logged_real_samples = True audio_predict = audio_predict[0].data.cpu() self.tb_logger.add_audio("val_pred", audio_predict, self.global_step, self.sample_rate) avg_loss = torch.stack([ x['val_loss'] for x in outputs ]).mean() # This reduces across batches, not workers! self.log('val_loss', avg_loss, sync_dist=True) self.log_train_images = True def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"): if "dataset" not in cfg or not isinstance(cfg.dataset, DictConfig): raise ValueError(f"No dataset for {name}") if "dataloader_params" not in cfg or not isinstance( cfg.dataloader_params, DictConfig): raise ValueError(f"No dataloder_params for {name}") if shuffle_should_be: if 'shuffle' not in cfg.dataloader_params: logging.warning( f"Shuffle should be set to True for {self}'s {name} dataloader but was not found in its " "config. Manually setting to True") with open_dict(cfg.dataloader_params): cfg.dataloader_params.shuffle = True elif not cfg.dataloader_params.shuffle: logging.error( f"The {name} dataloader for {self} has shuffle set to False!!!" ) elif not shuffle_should_be and cfg.dataloader_params.shuffle: logging.error( f"The {name} dataloader for {self} has shuffle set to True!!!") dataset = instantiate(cfg.dataset) return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params) def setup_training_data(self, cfg): self._train_dl = self.__setup_dataloader_from_config(cfg) def setup_validation_data(self, cfg): self._validation_dl = self.__setup_dataloader_from_config( cfg, shuffle_should_be=False, name="validation") def parse(self, str_input: str, additional_word2phones=None) -> torch.tensor: """ Parses text input and converts them to phoneme indices. str_input (str): The input text to be converted. additional_word2phones (dict): Optional dictionary mapping words to phonemes for updating the model's word2phones. This will not overwrite the existing dictionary, just update it with OOV or new mappings. Defaults to None, which will keep the existing mapping. """ # Update model's word2phones if applicable if additional_word2phones is not None: self.word2phones.update(additional_word2phones) # Convert text -> normalized text -> list of phones per word -> indices if str_input[-1] not in [".", "!", "?"]: str_input = str_input + "." norm_text = re.findall(r"""[\w']+|[.,!?;"]""", self.parser._normalize(str_input)) try: phones = [self.word2phones[t] for t in norm_text] except KeyError as error: logging.error( f"ERROR: The following word in the input is not in the model's dictionary and could not be converted" f" to phonemes: ({error}).\n" f"You can pass in an `additional_word2phones` dictionary with a conversion for" f" this word, e.g. {{'{error}': \['phone1', 'phone2', ...\]}} to update the model's mapping." ) raise tokens = [] for phone_list in phones: inds = [self.phone2idx[p] for p in phone_list] tokens += inds x = torch.tensor(tokens).unsqueeze_(0).long().to(self.device) return x def convert_text_to_waveform(self, *, tokens): """ Accepts tokens returned from self.parse() and returns a list of tensors. Note: The tensors in the list can have different lengths. """ self.eval() token_len = torch.tensor([len(i) for i in tokens]).to(self.device) audio, _, log_dur_pred, _, _, _ = self(text=tokens, text_length=token_len, splice=False) audio = audio.squeeze(1) durations = torch.sum(torch.exp(log_dur_pred) - 1, 1).to(torch.int) audio_list = [] for i, sample in enumerate(audio): audio_list.append(sample[:durations[i] * self.hop_size]) return audio_list @classmethod def list_available_models(cls) -> 'List[PretrainedModelInfo]': """ This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud. Returns: List of available pre-trained models. """ list_of_models = [] model = PretrainedModelInfo( pretrained_model_name="tts_en_e2e_fastspeech2hifigan", location= "https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_e2e_fastspeech2hifigan/versions/1.0.0/files/tts_en_e2e_fastspeech2hifigan.nemo", description= "This model is trained on LJSpeech sampled at 22050Hz with and can be used to generate female English voices with an American accent.", class_=cls, ) list_of_models.append(model) return list_of_models
class MixerTTSModel(SpectrogramGenerator, Exportable): """MixerTTS pipeline.""" def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): super().__init__(cfg=cfg, trainer=trainer) cfg = self._cfg if "text_normalizer" in cfg.train_ds.dataset: self.normalizer = instantiate(cfg.train_ds.dataset.text_normalizer) self.text_normalizer_call = self.normalizer.normalize self.text_normalizer_call_args = {} if cfg.train_ds.dataset.get("text_normalizer_call_args", None) is not None: self.text_normalizer_call_args = cfg.train_ds.dataset.text_normalizer_call_args self.tokenizer = instantiate(cfg.train_ds.dataset.text_tokenizer) num_tokens = len(self.tokenizer.tokens) self.tokenizer_pad = self.tokenizer.pad self.tokenizer_unk = self.tokenizer.oov self.pitch_loss_scale = cfg.pitch_loss_scale self.durs_loss_scale = cfg.durs_loss_scale self.mel_loss_scale = cfg.mel_loss_scale self.aligner = instantiate(cfg.alignment_module) self.forward_sum_loss = ForwardSumLoss() self.bin_loss = BinLoss() self.add_bin_loss = False self.bin_loss_scale = 0.0 self.bin_loss_start_ratio = cfg.bin_loss_start_ratio self.bin_loss_warmup_epochs = cfg.bin_loss_warmup_epochs self.cond_on_lm_embeddings = cfg.get("cond_on_lm_embeddings", False) if self.cond_on_lm_embeddings: self.lm_padding_value = (self._train_dl.dataset.lm_padding_value if self._train_dl is not None else self._get_lm_padding_value( cfg.train_ds.dataset.lm_model)) self.lm_embeddings = self._get_lm_embeddings( cfg.train_ds.dataset.lm_model) self.lm_embeddings.weight.requires_grad = False self.self_attention_module = instantiate( cfg.self_attention_module, n_lm_tokens_channels=self.lm_embeddings.weight.shape[1]) self.encoder = instantiate(cfg.encoder, num_tokens=num_tokens, padding_idx=self.tokenizer_pad) self.symbol_emb = self.encoder.to_embed self.duration_predictor = instantiate(cfg.duration_predictor) self.pitch_mean, self.pitch_std = float(cfg.pitch_mean), float( cfg.pitch_std) self.pitch_predictor = instantiate(cfg.pitch_predictor) self.pitch_emb = instantiate(cfg.pitch_emb) self.preprocessor = instantiate(cfg.preprocessor) self.decoder = instantiate(cfg.decoder) self.proj = nn.Linear(self.decoder.d_model, cfg.n_mel_channels) def _get_lm_model_tokenizer(self, lm_model="albert"): if getattr(self, "_lm_model_tokenizer", None) is not None: return self._lm_model_tokenizer if self._train_dl is not None and self._train_dl.dataset is not None: self._lm_model_tokenizer = self._train_dl.dataset.lm_model_tokenizer if lm_model == "albert": self._lm_model_tokenizer = AlbertTokenizer.from_pretrained( 'albert-base-v2') else: raise NotImplementedError( f"{lm_model} lm model is not supported. Only albert is supported at this moment." ) return self._lm_model_tokenizer def _get_lm_embeddings(self, lm_model="albert"): if lm_model == "albert": return transformers.AlbertModel.from_pretrained( 'albert-base-v2').embeddings.word_embeddings else: raise NotImplementedError( f"{lm_model} lm model is not supported. Only albert is supported at this moment." ) def _get_lm_padding_value(self, lm_model="albert"): if lm_model == "albert": return transformers.AlbertTokenizer.from_pretrained( 'albert-base-v2')._convert_token_to_id('<pad>') else: raise NotImplementedError( f"{lm_model} lm model is not supported. Only albert is supported at this moment." ) def _metrics( self, true_durs, true_text_len, pred_durs, true_pitch, pred_pitch, true_spect=None, pred_spect=None, true_spect_len=None, attn_logprob=None, attn_soft=None, attn_hard=None, attn_hard_dur=None, ): text_mask = get_mask_from_lengths(true_text_len) mel_mask = get_mask_from_lengths(true_spect_len) loss = 0.0 # dur loss and metrics durs_loss = F.mse_loss(pred_durs, (true_durs + 1).float().log(), reduction='none') durs_loss = durs_loss * text_mask.float() durs_loss = durs_loss.sum() / text_mask.sum() durs_pred = pred_durs.exp() - 1 durs_pred = torch.clamp_min(durs_pred, min=0) durs_pred = durs_pred.round().long() acc = ((true_durs == durs_pred) * text_mask).sum().float() / text_mask.sum() * 100 acc_dist_1 = (((true_durs - durs_pred).abs() <= 1) * text_mask).sum().float() / text_mask.sum() * 100 acc_dist_3 = (((true_durs - durs_pred).abs() <= 3) * text_mask).sum().float() / text_mask.sum() * 100 pred_spect = pred_spect.transpose(1, 2) # mel loss mel_loss = F.mse_loss(pred_spect, true_spect, reduction='none').mean(dim=-2) mel_loss = mel_loss * mel_mask.float() mel_loss = mel_loss.sum() / mel_mask.sum() loss = loss + self.durs_loss_scale * durs_loss + self.mel_loss_scale * mel_loss # aligner loss bin_loss, ctc_loss = None, None ctc_loss = self.forward_sum_loss(attn_logprob=attn_logprob, in_lens=true_text_len, out_lens=true_spect_len) loss = loss + ctc_loss if self.add_bin_loss: bin_loss = self.bin_loss(hard_attention=attn_hard, soft_attention=attn_soft) loss = loss + self.bin_loss_scale * bin_loss true_avg_pitch = average_pitch(true_pitch.unsqueeze(1), attn_hard_dur).squeeze(1) # pitch loss pitch_loss = F.mse_loss(pred_pitch, true_avg_pitch, reduction='none') # noqa pitch_loss = (pitch_loss * text_mask).sum() / text_mask.sum() loss = loss + self.pitch_loss_scale * pitch_loss return loss, durs_loss, acc, acc_dist_1, acc_dist_3, pitch_loss, mel_loss, ctc_loss, bin_loss @torch.jit.unused def run_aligner(self, text, text_len, text_mask, spect, spect_len, attn_prior): text_emb = self.symbol_emb(text) attn_soft, attn_logprob = self.aligner( spect, text_emb.permute(0, 2, 1), mask=text_mask == 0, attn_prior=attn_prior, ) attn_hard = binarize_attention_parallel(attn_soft, text_len, spect_len) attn_hard_dur = attn_hard.sum(2)[:, 0, :] assert torch.all(torch.eq(attn_hard_dur.sum(dim=1), spect_len)) return attn_soft, attn_logprob, attn_hard, attn_hard_dur @typecheck( input_types={ "text": NeuralType(('B', 'T_text'), TokenIndex()), "text_len": NeuralType(('B', ), LengthsType()), "pitch": NeuralType(('B', 'T_audio'), RegressionValuesType(), optional=True), "spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType(), optional=True), "spect_len": NeuralType(('B', ), LengthsType(), optional=True), "attn_prior": NeuralType(('B', 'T_spec', 'T_text'), ProbsType(), optional=True), "lm_tokens": NeuralType(('B', 'T_lm_tokens'), TokenIndex(), optional=True), }, output_types={ "pred_spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()), "durs_predicted": NeuralType(('B', 'T_text'), TokenDurationType()), "log_durs_predicted": NeuralType(('B', 'T_text'), TokenLogDurationType()), "pitch_predicted": NeuralType(('B', 'T_text'), RegressionValuesType()), "attn_soft": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()), "attn_logprob": NeuralType(('B', 'S', 'T_spec', 'T_text'), LogprobsType()), "attn_hard": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()), "attn_hard_dur": NeuralType(('B', 'T_text'), TokenDurationType()), }, ) def forward(self, text, text_len, pitch=None, spect=None, spect_len=None, attn_prior=None, lm_tokens=None): if self.training: assert pitch is not None text_mask = get_mask_from_lengths(text_len).unsqueeze(2) enc_out, enc_mask = self.encoder(text, text_mask) # aligner attn_soft, attn_logprob, attn_hard, attn_hard_dur = None, None, None, None if spect is not None: attn_soft, attn_logprob, attn_hard, attn_hard_dur = self.run_aligner( text, text_len, text_mask, spect, spect_len, attn_prior) if self.cond_on_lm_embeddings: lm_emb = self.lm_embeddings(lm_tokens) lm_features = self.self_attention_module( enc_out, lm_emb, lm_emb, q_mask=enc_mask.squeeze(2), kv_mask=lm_tokens != self.lm_padding_value) # duration predictor log_durs_predicted = self.duration_predictor(enc_out, enc_mask) durs_predicted = torch.clamp(log_durs_predicted.exp() - 1, 0) # pitch predictor pitch_predicted = self.pitch_predictor(enc_out, enc_mask) # avg pitch, add pitch_emb if not self.training: if pitch is not None: pitch = average_pitch(pitch.unsqueeze(1), attn_hard_dur).squeeze(1) pitch_emb = self.pitch_emb(pitch.unsqueeze(1)) else: pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1)) else: pitch = average_pitch(pitch.unsqueeze(1), attn_hard_dur).squeeze(1) pitch_emb = self.pitch_emb(pitch.unsqueeze(1)) enc_out = enc_out + pitch_emb.transpose(1, 2) if self.cond_on_lm_embeddings: enc_out = enc_out + lm_features # regulate length len_regulated_enc_out, dec_lens = regulate_len(attn_hard_dur, enc_out) dec_out, dec_lens = self.decoder( len_regulated_enc_out, get_mask_from_lengths(dec_lens).unsqueeze(2)) pred_spect = self.proj(dec_out) return ( pred_spect, durs_predicted, log_durs_predicted, pitch_predicted, attn_soft, attn_logprob, attn_hard, attn_hard_dur, ) def infer( self, text, text_len=None, text_mask=None, spect=None, spect_len=None, attn_prior=None, use_gt_durs=False, lm_tokens=None, pitch=None, ): if text_mask is None: text_mask = get_mask_from_lengths(text_len).unsqueeze(2) enc_out, enc_mask = self.encoder(text, text_mask) # aligner attn_hard_dur = None if use_gt_durs: attn_soft, attn_logprob, attn_hard, attn_hard_dur = self.run_aligner( text, text_len, text_mask, spect, spect_len, attn_prior) if self.cond_on_lm_embeddings: lm_emb = self.lm_embeddings(lm_tokens) lm_features = self.self_attention_module( enc_out, lm_emb, lm_emb, q_mask=enc_mask.squeeze(2), kv_mask=lm_tokens != self.lm_padding_value) # duration predictor log_durs_predicted = self.duration_predictor(enc_out, enc_mask) durs_predicted = torch.clamp(log_durs_predicted.exp() - 1, 0) # avg pitch, pitch predictor if use_gt_durs and pitch is not None: pitch = average_pitch(pitch.unsqueeze(1), attn_hard_dur).squeeze(1) pitch_emb = self.pitch_emb(pitch.unsqueeze(1)) else: pitch_predicted = self.pitch_predictor(enc_out, enc_mask) pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1)) # add pitch emb enc_out = enc_out + pitch_emb.transpose(1, 2) if self.cond_on_lm_embeddings: enc_out = enc_out + lm_features if use_gt_durs: if attn_hard_dur is not None: len_regulated_enc_out, dec_lens = regulate_len( attn_hard_dur, enc_out) else: raise NotImplementedError else: len_regulated_enc_out, dec_lens = regulate_len( durs_predicted, enc_out) dec_out, _ = self.decoder(len_regulated_enc_out, get_mask_from_lengths(dec_lens).unsqueeze(2)) pred_spect = self.proj(dec_out) return pred_spect def on_train_epoch_start(self): bin_loss_start_epoch = np.ceil(self.bin_loss_start_ratio * self._trainer.max_epochs) # Add bin loss when current_epoch >= bin_start_epoch if not self.add_bin_loss and self.current_epoch >= bin_loss_start_epoch: logging.info( f"Using hard attentions after epoch: {self.current_epoch}") self.add_bin_loss = True if self.add_bin_loss: self.bin_loss_scale = min( (self.current_epoch - bin_loss_start_epoch) / self.bin_loss_warmup_epochs, 1.0) def training_step(self, batch, batch_idx): attn_prior, lm_tokens = None, None if self.cond_on_lm_embeddings: audio, audio_len, text, text_len, attn_prior, pitch, _, lm_tokens = batch else: audio, audio_len, text, text_len, attn_prior, pitch, _ = batch spect, spect_len = self.preprocessor(input_signal=audio, length=audio_len) # pitch normalization zero_pitch_idx = pitch == 0 pitch = (pitch - self.pitch_mean) / self.pitch_std pitch[zero_pitch_idx] = 0.0 ( pred_spect, _, pred_log_durs, pred_pitch, attn_soft, attn_logprob, attn_hard, attn_hard_dur, ) = self( text=text, text_len=text_len, pitch=pitch, spect=spect, spect_len=spect_len, attn_prior=attn_prior, lm_tokens=lm_tokens, ) ( loss, durs_loss, acc, acc_dist_1, acc_dist_3, pitch_loss, mel_loss, ctc_loss, bin_loss, ) = self._metrics( pred_durs=pred_log_durs, pred_pitch=pred_pitch, true_durs=attn_hard_dur, true_text_len=text_len, true_pitch=pitch, true_spect=spect, pred_spect=pred_spect, true_spect_len=spect_len, attn_logprob=attn_logprob, attn_soft=attn_soft, attn_hard=attn_hard, attn_hard_dur=attn_hard_dur, ) train_log = { 'train_loss': loss, 'train_durs_loss': durs_loss, 'train_pitch_loss': torch.tensor(1.0).to(durs_loss.device) if pitch_loss is None else pitch_loss, 'train_mel_loss': mel_loss, 'train_durs_acc': acc, 'train_durs_acc_dist_3': acc_dist_3, 'train_ctc_loss': torch.tensor(1.0).to(durs_loss.device) if ctc_loss is None else ctc_loss, 'train_bin_loss': torch.tensor(1.0).to(durs_loss.device) if bin_loss is None else bin_loss, } return {'loss': loss, 'progress_bar': train_log, 'log': train_log} def validation_step(self, batch, batch_idx): attn_prior, lm_tokens = None, None if self.cond_on_lm_embeddings: audio, audio_len, text, text_len, attn_prior, pitch, _, lm_tokens = batch else: audio, audio_len, text, text_len, attn_prior, pitch, _ = batch spect, spect_len = self.preprocessor(input_signal=audio, length=audio_len) # pitch normalization zero_pitch_idx = pitch == 0 pitch = (pitch - self.pitch_mean) / self.pitch_std pitch[zero_pitch_idx] = 0.0 ( pred_spect, _, pred_log_durs, pred_pitch, attn_soft, attn_logprob, attn_hard, attn_hard_dur, ) = self( text=text, text_len=text_len, pitch=pitch, spect=spect, spect_len=spect_len, attn_prior=attn_prior, lm_tokens=lm_tokens, ) ( loss, durs_loss, acc, acc_dist_1, acc_dist_3, pitch_loss, mel_loss, ctc_loss, bin_loss, ) = self._metrics( pred_durs=pred_log_durs, pred_pitch=pred_pitch, true_durs=attn_hard_dur, true_text_len=text_len, true_pitch=pitch, true_spect=spect, pred_spect=pred_spect, true_spect_len=spect_len, attn_logprob=attn_logprob, attn_soft=attn_soft, attn_hard=attn_hard, attn_hard_dur=attn_hard_dur, ) # without ground truth internal features except for durations pred_spect, _, pred_log_durs, pred_pitch, attn_soft, attn_logprob, attn_hard, attn_hard_dur = self( text=text, text_len=text_len, pitch=None, spect=spect, spect_len=spect_len, attn_prior=attn_prior, lm_tokens=lm_tokens, ) *_, with_pred_features_mel_loss, _, _ = self._metrics( pred_durs=pred_log_durs, pred_pitch=pred_pitch, true_durs=attn_hard_dur, true_text_len=text_len, true_pitch=pitch, true_spect=spect, pred_spect=pred_spect, true_spect_len=spect_len, attn_logprob=attn_logprob, attn_soft=attn_soft, attn_hard=attn_hard, attn_hard_dur=attn_hard_dur, ) val_log = { 'val_loss': loss, 'val_durs_loss': durs_loss, 'val_pitch_loss': torch.tensor(1.0).to(durs_loss.device) if pitch_loss is None else pitch_loss, 'val_mel_loss': mel_loss, 'val_with_pred_features_mel_loss': with_pred_features_mel_loss, 'val_durs_acc': acc, 'val_durs_acc_dist_3': acc_dist_3, 'val_ctc_loss': torch.tensor(1.0).to(durs_loss.device) if ctc_loss is None else ctc_loss, 'val_bin_loss': torch.tensor(1.0).to(durs_loss.device) if bin_loss is None else bin_loss, } self.log_dict(val_log, prog_bar=False, on_epoch=True, logger=True, sync_dist=True) if batch_idx == 0 and self.current_epoch % 5 == 0 and isinstance( self.logger, WandbLogger): specs = [] pitches = [] for i in range(min(3, spect.shape[0])): specs += [ wandb.Image( plot_spectrogram_to_numpy( spect[i, :, :spect_len[i]].data.cpu().numpy()), caption=f"gt mel {i}", ), wandb.Image( plot_spectrogram_to_numpy( pred_spect.transpose( 1, 2)[i, :, :spect_len[i]].data.cpu().numpy()), caption=f"pred mel {i}", ), ] pitches += [ wandb.Image( plot_pitch_to_numpy( average_pitch(pitch.unsqueeze(1), attn_hard_dur).squeeze(1) [i, :text_len[i]].data.cpu().numpy(), ylim_range=[-2.5, 2.5], ), caption=f"gt pitch {i}", ), ] pitches += [ wandb.Image( plot_pitch_to_numpy( pred_pitch[i, :text_len[i]].data.cpu().numpy(), ylim_range=[-2.5, 2.5]), caption=f"pred pitch {i}", ), ] self.logger.experiment.log({"specs": specs, "pitches": pitches}) @typecheck( input_types={ "tokens": NeuralType(('B', 'T_text'), TokenIndex(), optional=True), "tokens_len": NeuralType(('B'), LengthsType(), optional=True), "lm_tokens": NeuralType(('B', 'T_lm_tokens'), TokenIndex(), optional=True), "raw_texts": [NeuralType(optional=True)], "lm_model": NeuralType(optional=True), }, output_types={ "spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()), }, ) def generate_spectrogram( self, tokens: Optional[torch.Tensor] = None, tokens_len: Optional[torch.Tensor] = None, lm_tokens: Optional[torch.Tensor] = None, raw_texts: Optional[List[str]] = None, lm_model: str = "albert", ): if tokens is not None: if tokens_len is None: # it is assumed that padding is consecutive and only at the end tokens_len = (tokens != self.tokenizer.pad).sum(dim=-1) else: if raw_texts is None: logging.error("raw_texts must be specified if tokens is None") t_seqs = [self.tokenizer(t) for t in raw_texts] tokens = torch.nn.utils.rnn.pad_sequence( sequences=[ torch.tensor(t, dtype=torch.long, device=self.device) for t in t_seqs ], batch_first=True, padding_value=self.tokenizer.pad, ) tokens_len = torch.tensor([len(t) for t in t_seqs], dtype=torch.long, device=tokens.device) if self.cond_on_lm_embeddings and lm_tokens is None: if raw_texts is None: logging.error( "raw_texts must be specified if lm_tokens is None") lm_model_tokenizer = self._get_lm_model_tokenizer(lm_model) lm_padding_value = lm_model_tokenizer._convert_token_to_id('<pad>') lm_space_value = lm_model_tokenizer._convert_token_to_id('▁') assert isinstance(self.tokenizer, EnglishCharsTokenizer) or isinstance( self.tokenizer, EnglishPhonemesTokenizer) preprocess_texts_as_tts_input = [ self.tokenizer.text_preprocessing_func(t) for t in raw_texts ] lm_tokens_as_ids_list = [ lm_model_tokenizer.encode(t, add_special_tokens=False) for t in preprocess_texts_as_tts_input ] if self.tokenizer.pad_with_space: lm_tokens_as_ids_list = [[lm_space_value] + t + [lm_space_value] for t in lm_tokens_as_ids_list] lm_tokens = torch.full( (len(lm_tokens_as_ids_list), max([len(t) for t in lm_tokens_as_ids_list])), fill_value=lm_padding_value, device=tokens.device, ) for i, lm_tokens_i in enumerate(lm_tokens_as_ids_list): lm_tokens[i, :len(lm_tokens_i)] = torch.tensor( lm_tokens_i, device=tokens.device) pred_spect = self.infer(tokens, tokens_len, lm_tokens=lm_tokens).transpose(1, 2) return pred_spect def parse(self, text: str, normalize=True) -> torch.Tensor: if normalize and getattr(self, "text_normalizer_call", None) is not None: text = self.text_normalizer_call(text, **self.text_normalizer_call_args) return torch.tensor( self.tokenizer.encode(text)).long().unsqueeze(0).to(self.device) @staticmethod def _loader(cfg): try: _ = cfg.dataset.manifest_filepath except omegaconf.errors.MissingMandatoryValue: logging.warning( "manifest_filepath was skipped. No dataset for this model.") return None dataset = instantiate(cfg.dataset) return torch.utils.data.DataLoader( # noqa dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, ) def setup_training_data(self, cfg): self._train_dl = self._loader(cfg) def setup_validation_data(self, cfg): self._validation_dl = self._loader(cfg) def setup_test_data(self, cfg): """Omitted.""" pass @classmethod def list_available_models(cls): """Empty.""" pass @property def input_types(self): return { "text": NeuralType(('B', 'T_text'), TokenIndex()), "lm_tokens": NeuralType(('B', 'T_lm_tokens'), TokenIndex(), optional=True), } @property def output_types(self): return { "spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()), } def forward_for_export(self, text, lm_tokens=None): text_mask = (text != self.tokenizer_pad).unsqueeze(2) spect = self.infer(text=text, text_mask=text_mask, lm_tokens=lm_tokens).transpose(1, 2) return spect.to(torch.float)
class FastPitchHifiGanE2EModel(TextToWaveform): """An end-to-end speech synthesis model based on FastPitch and HiFiGan that converts strings to audio without using the intermediate mel spectrogram representation. """ def __init__(self, cfg: DictConfig, trainer: Trainer = None): if isinstance(cfg, dict): cfg = OmegaConf.create(cfg) self._parser = parsers.make_parser( labels=cfg.labels, name='en', unk_id=-1, blank_id=-1, do_normalize=True, abbreviation_version="fastpitch", make_table=False, ) super().__init__(cfg=cfg, trainer=trainer) schema = OmegaConf.structured(FastPitchHifiGanE2EConfig) # ModelPT ensures that cfg is a DictConfig, but do this second check in case ModelPT changes if isinstance(cfg, dict): cfg = OmegaConf.create(cfg) elif not isinstance(cfg, DictConfig): raise ValueError( f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig" ) # Ensure passed cfg is compliant with schema OmegaConf.merge(cfg, schema) self.preprocessor = instantiate(cfg.preprocessor) self.melspec_fn = instantiate(cfg.preprocessor, highfreq=None, use_grads=True) self.encoder = instantiate(cfg.input_fft) self.duration_predictor = instantiate(cfg.duration_predictor) self.pitch_predictor = instantiate(cfg.pitch_predictor) self.generator = instantiate(cfg.generator) self.multiperioddisc = MultiPeriodDiscriminator() self.multiscaledisc = MultiScaleDiscriminator() self.mel_val_loss = L1MelLoss() self.feat_matching_loss = FeatureMatchingLoss() self.disc_loss = DiscriminatorLoss() self.gen_loss = GeneratorLoss() self.max_token_duration = cfg.max_token_duration self.pitch_emb = torch.nn.Conv1d( 1, cfg.symbols_embedding_dim, kernel_size=cfg.pitch_embedding_kernel_size, padding=int((cfg.pitch_embedding_kernel_size - 1) / 2), ) # Store values precomputed from training data for convenience self.register_buffer('pitch_mean', torch.zeros(1)) self.register_buffer('pitch_std', torch.zeros(1)) self.pitchloss = PitchLoss() self.durationloss = DurationLoss() self.mel_loss_coeff = cfg.mel_loss_coeff self.log_train_images = False self.logged_real_samples = False self._tb_logger = None self.hann_window = None self.splice_length = cfg.splice_length self.sample_rate = cfg.sample_rate self.hop_size = cfg.hop_size @property def tb_logger(self): if self._tb_logger is None: if self.logger is None and self.logger.experiment is None: return None tb_logger = self.logger.experiment if isinstance(self.logger, LoggerCollection): for logger in self.logger: if isinstance(logger, TensorBoardLogger): tb_logger = logger.experiment break self._tb_logger = tb_logger return self._tb_logger @property def parser(self): if self._parser is not None: return self._parser self._parser = parsers.make_parser( labels=self._cfg.labels, name='en', unk_id=-1, blank_id=-1, do_normalize=True, abbreviation_version="fastpitch", make_table=False, ) return self._parser def parse(self, str_input: str) -> torch.tensor: if str_input[-1] not in [".", "!", "?"]: str_input = str_input + "." tokens = self.parser(str_input) x = torch.tensor(tokens).unsqueeze_(0).long().to(self.device) return x def configure_optimizers(self): gen_params = chain( self.pitch_emb.parameters(), self.encoder.parameters(), self.duration_predictor.parameters(), self.pitch_predictor.parameters(), self.generator.parameters(), ) disc_params = chain(self.multiscaledisc.parameters(), self.multiperioddisc.parameters()) opt1 = torch.optim.AdamW(disc_params, lr=self._cfg.lr) opt2 = torch.optim.AdamW(gen_params, lr=self._cfg.lr) num_procs = self._trainer.num_gpus * self._trainer.num_nodes num_samples = len(self._train_dl.dataset) batch_size = self._train_dl.batch_size iter_per_epoch = np.ceil(num_samples / (num_procs * batch_size)) max_steps = iter_per_epoch * self._trainer.max_epochs logging.info(f"MAX STEPS: {max_steps}") sch1 = NoamAnnealing(opt1, d_model=1, warmup_steps=1000, max_steps=max_steps, last_epoch=-1) sch1_dict = { 'scheduler': sch1, 'interval': 'step', } sch2 = NoamAnnealing(opt2, d_model=1, warmup_steps=1000, max_steps=max_steps, last_epoch=-1) sch2_dict = { 'scheduler': sch2, 'interval': 'step', } return [opt1, opt2], [sch1_dict, sch2_dict] @typecheck( input_types={ "text": NeuralType(('B', 'T'), TokenIndex()), "durs": NeuralType(('B', 'T'), TokenDurationType(), optional=True), "pitch": NeuralType(('B', 'T'), RegressionValuesType(), optional=True), "pace": NeuralType(optional=True), "splice": NeuralType(optional=True), }, output_types={ "audio": NeuralType(('B', 'S', 'T'), AudioSignal()), "splices": NeuralType(), "log_dur_preds": NeuralType(('B', 'T'), TokenLogDurationType()), "pitch_preds": NeuralType(('B', 'T'), RegressionValuesType()), }, ) def forward(self, *, text, durs=None, pitch=None, pace=1.0, splice=True): if self.training: assert durs is not None assert pitch is not None # Input FFT enc_out, enc_mask = self.encoder(input=text, conditioning=0) # Embedded for predictors pred_enc_out, pred_enc_mask = enc_out, enc_mask # Predict durations log_durs_predicted = self.duration_predictor(pred_enc_out, pred_enc_mask) durs_predicted = torch.clamp( torch.exp(log_durs_predicted) - 1, 0, self.max_token_duration) # Predict pitch pitch_predicted = self.pitch_predictor(enc_out, enc_mask) if pitch is None: pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1)) else: pitch_emb = self.pitch_emb(pitch.unsqueeze(1)) enc_out = enc_out + pitch_emb.transpose(1, 2) if durs is None: len_regulated, dec_lens = regulate_len(durs_predicted, enc_out, pace) else: len_regulated, dec_lens = regulate_len(durs, enc_out, pace) gen_in = len_regulated splices = [] if splice: output = [] for i, sample in enumerate(len_regulated): start = np.random.randint( low=0, high=min(int(sample.size(0)), int(dec_lens[i])) - self.splice_length) # Splice generated spec output.append(sample[start:start + self.splice_length, :]) splices.append(start) gen_in = torch.stack(output) output = self.generator(x=gen_in.transpose(1, 2)) return output, torch.tensor( splices), log_durs_predicted, pitch_predicted def training_step(self, batch, batch_idx, optimizer_idx): audio, _, text, text_lens, durs, pitch, _ = batch # train discriminator if optimizer_idx == 0: with torch.no_grad(): audio_pred, splices, _, _ = self(text=text, durs=durs, pitch=pitch) real_audio = [] for i, splice in enumerate(splices): real_audio.append( audio[i, splice * self.hop_size:(splice + self.splice_length) * self.hop_size]) real_audio = torch.stack(real_audio).unsqueeze(1) real_score_mp, gen_score_mp, _, _ = self.multiperioddisc( real_audio, audio_pred) real_score_ms, gen_score_ms, _, _ = self.multiscaledisc( real_audio, audio_pred) loss_mp, loss_mp_real, _ = self.disc_loss( disc_real_outputs=real_score_mp, disc_generated_outputs=gen_score_mp) loss_ms, loss_ms_real, _ = self.disc_loss( disc_real_outputs=real_score_ms, disc_generated_outputs=gen_score_ms) loss_mp /= len(loss_mp_real) loss_ms /= len(loss_ms_real) loss_disc = loss_mp + loss_ms self.log("loss_discriminator", loss_disc, prog_bar=True) self.log("loss_discriminator_ms", loss_ms) self.log("loss_discriminator_mp", loss_mp) return loss_disc # train generator elif optimizer_idx == 1: audio_pred, splices, log_dur_preds, pitch_preds = self(text=text, durs=durs, pitch=pitch) real_audio = [] for i, splice in enumerate(splices): real_audio.append( audio[i, splice * self.hop_size:(splice + self.splice_length) * self.hop_size]) real_audio = torch.stack(real_audio).unsqueeze(1) dur_loss = self.durationloss(log_durs_predicted=log_dur_preds, durs_tgt=durs, len=text_lens) pitch_loss = self.pitchloss( pitch_predicted=pitch_preds, pitch_tgt=pitch, ) # Do HiFiGAN generator loss audio_length = torch.tensor([ self.splice_length * self.hop_size for _ in range(real_audio.shape[0]) ]).to(real_audio.device) real_spliced_spec, _ = self.melspec_fn(real_audio.squeeze(), audio_length) pred_spliced_spec, _ = self.melspec_fn(audio_pred.squeeze(), audio_length) loss_mel = torch.nn.functional.l1_loss(real_spliced_spec, pred_spliced_spec) loss_mel *= self.mel_loss_coeff _, gen_score_mp, _, _ = self.multiperioddisc( real_audio, audio_pred) _, gen_score_ms, _, _ = self.multiscaledisc(y=real_audio, y_hat=audio_pred) loss_gen_mp, list_loss_gen_mp = self.gen_loss( disc_outputs=gen_score_mp) loss_gen_ms, list_loss_gen_ms = self.gen_loss( disc_outputs=gen_score_ms) loss_gen_mp /= len(list_loss_gen_mp) loss_gen_ms /= len(list_loss_gen_ms) total_loss = loss_gen_mp + loss_gen_ms + loss_mel total_loss += dur_loss total_loss += pitch_loss self.log(name="loss_gen_mel", value=loss_mel) self.log(name="loss_gen_disc", value=loss_gen_mp + loss_gen_ms) self.log(name="loss_gen_disc_mp", value=loss_gen_mp) self.log(name="loss_gen_disc_ms", value=loss_gen_ms) self.log(name="loss_gen_duration", value=dur_loss) self.log(name="loss_gen_pitch", value=pitch_loss) # Log images to tensorboard if self.log_train_images: self.log_train_images = False if self.logger is not None and self.logger.experiment is not None: self.tb_logger.add_image( "train_mel_target", plot_spectrogram_to_numpy( real_spliced_spec[0].data.cpu().numpy()), self.global_step, dataformats="HWC", ) spec_predict = pred_spliced_spec[0].data.cpu().numpy() self.tb_logger.add_image( "train_mel_predicted", plot_spectrogram_to_numpy(spec_predict), self.global_step, dataformats="HWC", ) self.log(name="loss_gen", prog_bar=True, value=total_loss) return total_loss def validation_step(self, batch, batch_idx): audio, audio_lens, text, _, _, _, _ = batch mels, mel_lens = self.preprocessor(audio, audio_lens) audio_pred, _, log_durs_predicted, _ = self(text=text, durs=None, pitch=None, splice=False) audio_length = torch.sum(torch.clamp(torch.exp(log_durs_predicted - 1), 0), axis=1) audio_pred.squeeze_() pred_spec, _ = self.melspec_fn(audio_pred, audio_length) loss = self.mel_val_loss(spec_pred=pred_spec, spec_target=mels, spec_target_len=mel_lens, pad_value=-11.52, transpose=False) return { "val_loss": loss, "audio_target": audio if batch_idx == 0 else None, "audio_pred": audio_pred.squeeze() if batch_idx == 0 else None, } def validation_epoch_end(self, outputs): if self.tb_logger is not None: _, audio_target, audio_predict = outputs[0].values() if not self.logged_real_samples: self.tb_logger.add_audio("val_target", audio_target[0].data.cpu(), self.global_step, self.sample_rate) self.logged_real_samples = True audio_predict = audio_predict[0].data.cpu() self.tb_logger.add_audio("val_pred", audio_predict, self.global_step, self.sample_rate) avg_loss = torch.stack([ x['val_loss'] for x in outputs ]).mean() # This reduces across batches, not workers! self.log('val_loss', avg_loss, sync_dist=True) self.log_train_images = True def _loader(self, cfg): dataset = FastPitchDataset( manifest_filepath=cfg['manifest_filepath'], parser=self.parser, sample_rate=cfg['sample_rate'], int_values=cfg.get('int_values', False), max_duration=cfg.get('max_duration', None), min_duration=cfg.get('min_duration', None), max_utts=cfg.get('max_utts', 0), trim=cfg.get('trim_silence', True), ) return torch.utils.data.DataLoader( dataset=dataset, batch_size=cfg['batch_size'], collate_fn=dataset.collate_fn, drop_last=cfg.get('drop_last', True), shuffle=cfg['shuffle'], num_workers=cfg.get('num_workers', 16), ) def setup_training_data(self, cfg): self._train_dl = self._loader(cfg) def setup_validation_data(self, cfg): self._validation_dl = self._loader(cfg) def setup_test_data(self, cfg): """Omitted.""" pass @classmethod def list_available_models(cls) -> 'List[PretrainedModelInfo]': """ This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud. Returns: List of available pre-trained models. """ list_of_models = [] model = PretrainedModelInfo( pretrained_model_name="tts_en_e2e_fastpitchhifigan", location= "https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_e2e_fastpitchhifigan/versions/1.0.0/files/tts_en_e2e_fastpitchhifigan.nemo", description= "This model is trained on LJSpeech sampled at 22050Hz with and can be used to generate female English voices with an American accent.", class_=cls, ) list_of_models.append(model) return list_of_models def convert_text_to_waveform(self, *, tokens): """ Accepts tokens returned from self.parse() and returns a list of tensors. Note: The tensors in the list can have different lengths. """ self.eval() audio, _, log_dur_pred, _ = self(text=tokens, splice=False) audio = audio.squeeze(1) durations = torch.sum( torch.clamp( torch.exp(log_dur_pred) - 1, 0, self.max_token_duration), 1).to(torch.int) audio_list = [] for i, sample in enumerate(audio): audio_list.append(sample[:durations[i] * self.hop_size]) return audio_list