def test_tacotron2_train_some_layers(var_train_expr, config_path): config = Tacotron2Config(n_speakers=5, reduction_factor=1) model = TFTacotron2(config, training=True) model._build() optimizer = tf.keras.optimizers.Adam(lr=0.001) with open(config_path) as f: config = yaml.load(f, Loader=yaml.Loader) config.update({"outdir": "./"}) config.update({"var_train_expr": var_train_expr}) STRATEGY = return_strategy() trainer = Tacotron2Trainer( config=config, strategy=STRATEGY, steps=0, epochs=0, is_mixed_precision=False, ) trainer.compile(model, optimizer) len_trainable_vars = len(trainer._trainable_variables) all_trainable_vars = len(model.trainable_variables) if var_train_expr is None: tf.debugging.assert_equal(len_trainable_vars, all_trainable_vars) else: tf.debugging.assert_less(len_trainable_vars, all_trainable_vars)
def _converter_model(self): with open( config.tacotron2_baker ) as f: conf = yaml.load(f, Loader=yaml.Loader) conf = Tacotron2Config(**conf["tacotron2_params"]) self.tacotron2 = TFTacotron2(config=conf, training=False, name="tacotron2", enable_tflite_convertible=True) self.tacotron2.setup_window(win_front=5, win_back=5) self.tacotron2.setup_maximum_iterations(1000) # be careful self.tacotron2._build() self.tacotron2.load_weights(config.tacotron2_pretrained_path) tacotron2_concrete_function = self.tacotron2.inference_tflite.get_concrete_function() converter = tf.lite.TFLiteConverter.from_concrete_functions( [tacotron2_concrete_function] ) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() with open('tacotron2.tflite', 'wb') as f: f.write(tflite_model) print('Model size is %f MBs.' % (len(tflite_model) / 1024 / 1024.0) ) #tacotron2_config = AutoConfig.from_pretrained( config.tacotron2_baker ) #self.tacotron2 = TFAutoModel.from_pretrained( config=tacotron2_config, pretrained_path='tacotron2.tflite', training=False, name="tacotron2" ) #self.tacotron2.setup_window(win_front=5, win_back=5) self.interpreter = tf.lite.Interpreter(model_path='tacotron2.tflite') self.interpreter.allocate_tensors() self.input_details = self.interpreter.get_input_details() self.output_details = self.interpreter.get_output_details() mb_melgan_config = AutoConfig.from_pretrained( config.multiband_melgan_baker ) self.mb_melgan = TFAutoModel.from_pretrained( config=mb_melgan_config, pretrained_path=config.multiband_melgan_pretrained_path, name="mb_melgan" ) self.processor = AutoProcessor.from_pretrained(pretrained_path=config.baker_mapper_pretrained_path)
def test_tacotron2_trainable(n_speakers, n_chars, max_input_length, max_mel_length, batch_size): config = Tacotron2Config(n_speakers=n_speakers, reduction_factor=1) model = TFTacotron2(config, training=True) # model._build() # fake input input_ids = tf.random.uniform([batch_size, max_input_length], maxval=n_chars, dtype=tf.int32) speaker_ids = tf.convert_to_tensor([0] * batch_size, tf.int32) mel_outputs = tf.random.uniform(shape=[batch_size, max_mel_length, 80]) mel_lengths = np.random.randint(max_mel_length, high=max_mel_length + 1, size=[batch_size]) mel_lengths[-1] = max_mel_length mel_lengths = tf.convert_to_tensor(mel_lengths, dtype=tf.int32) stop_tokens = np.zeros((batch_size, max_mel_length), np.float32) stop_tokens = tf.convert_to_tensor(stop_tokens) optimizer = tf.keras.optimizers.Adam(lr=0.001) binary_crossentropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) @tf.function(experimental_relax_shapes=True) def one_step_training(input_ids, speaker_ids, mel_outputs, mel_lengths): with tf.GradientTape() as tape: mel_preds, \ post_mel_preds, \ stop_preds, \ alignment_history = model(input_ids, tf.constant([max_input_length, max_input_length]), speaker_ids, mel_outputs, mel_lengths, training=True) loss_before = tf.keras.losses.MeanSquaredError()(mel_outputs, mel_preds) loss_after = tf.keras.losses.MeanSquaredError()(mel_outputs, post_mel_preds) stop_gts = tf.expand_dims(tf.range(tf.reduce_max(mel_lengths), dtype=tf.int32), 0) # [1, max_len] stop_gts = tf.tile(stop_gts, [tf.shape(mel_lengths)[0], 1]) # [B, max_len] stop_gts = tf.cast(tf.math.greater_equal(stop_gts, tf.expand_dims(mel_lengths, 1) - 1), tf.float32) # calculate stop_token loss stop_token_loss = binary_crossentropy(stop_gts, stop_preds) loss = stop_token_loss + loss_before + loss_after gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) return loss, alignment_history for i in range(2): if i == 1: start = time.time() loss, alignment_history = one_step_training(input_ids, speaker_ids, mel_outputs, mel_lengths) print(f" > loss: {loss}") total_runtime = time.time() - start print(f" > Total run-time: {total_runtime}") print(f" > Avg run-time: {total_runtime/10}")
def _load_tacotron(self, path=OUT_TACOTRON_TFLITE_DIR): # initialize Tacotron2 model. config = os.path.join(path, 'config.yml') with open(config) as f: config = yaml.load(f, Loader=yaml.Loader) config = Tacotron2Config(**config["tacotron2_params"]) tacotron2 = TFTacotron2(config=config, training=False, name="tacotron2v1", enable_tflite_convertible=True) # Newly added : tacotron2.setup_window(win_front=6, win_back=6) tacotron2.setup_maximum_iterations(3000) tacotron2._build() weights = os.path.join(path, 'model-120000.h5') tacotron2.load_weights(weights) print(tacotron2.summary()) return tacotron2
def main(): """Running decode tacotron-2 mel-spectrogram.""" parser = argparse.ArgumentParser( description= "Decode mel-spectrogram from folder ids with trained Tacotron-2 " "(See detail in tensorflow_tts/example/tacotron2/decode_tacotron2.py)." ) parser.add_argument( "--rootdir", default=None, type=str, required=True, help="directory including ids/durations files.", ) parser.add_argument("--outdir", type=str, required=True, help="directory to save generated speech.") parser.add_argument("--checkpoint", type=str, required=True, help="checkpoint file to be loaded.") parser.add_argument("--use-norm", default=1, type=int, help="usr norm-mels for train or raw.") parser.add_argument("--batch-size", default=8, type=int, help="batch size.") parser.add_argument("--win-front", default=3, type=int, help="win-front.") parser.add_argument("--win-back", default=3, type=int, help="win-front.") parser.add_argument( "--config", default=None, type=str, required=True, help="yaml format configuration file. if not explicitly provided, " "it will be searched in the checkpoint directory. (default=None)", ) parser.add_argument( "--verbose", type=int, default=1, help="logging level. higher is more logging. (default=1)", ) args = parser.parse_args() # set logger if args.verbose > 1: logging.basicConfig( level=logging.DEBUG, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) elif args.verbose > 0: logging.basicConfig( level=logging.INFO, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) else: logging.basicConfig( level=logging.WARN, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) logging.warning("Skip DEBUG/INFO messages") # check directory existence if not os.path.exists(args.outdir): os.makedirs(args.outdir) # load config with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) config.update(vars(args)) if config["format"] == "npy": char_query = "*-ids.npy" mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy" char_load_fn = np.load mel_load_fn = np.load else: raise ValueError("Only npy is supported.") # define data-loader dataset = CharactorMelDataset( dataset=config["tacotron2_params"]["dataset"], root_dir=args.rootdir, charactor_query=char_query, mel_query=mel_query, charactor_load_fn=char_load_fn, mel_load_fn=mel_load_fn, reduction_factor=config["tacotron2_params"]["reduction_factor"]) dataset = dataset.create(allow_cache=True, batch_size=args.batch_size) # define model and load checkpoint tacotron2 = TFTacotron2( config=Tacotron2Config(**config["tacotron2_params"]), name="tacotron2", ) tacotron2._build() # build model to be able load_weights. tacotron2.load_weights(args.checkpoint) # setup window tacotron2.setup_window(win_front=args.win_front, win_back=args.win_back) for data in tqdm(dataset, desc="[Decoding]"): utt_ids = data["utt_ids"] utt_ids = utt_ids.numpy() # tacotron2 inference. ( mel_outputs, post_mel_outputs, stop_outputs, alignment_historys, ) = tacotron2.inference( input_ids=data["input_ids"], input_lengths=data["input_lengths"], speaker_ids=data["speaker_ids"], ) # convert to numpy post_mel_outputs = post_mel_outputs.numpy() for i, post_mel_output in enumerate(post_mel_outputs): stop_token = tf.math.round(tf.nn.sigmoid(stop_outputs[i])) # [T] real_length = tf.math.reduce_sum( tf.cast(tf.math.equal(stop_token, 0.0), tf.int32), -1) post_mel_output = post_mel_output[:real_length, :] saved_name = utt_ids[i].decode("utf-8") # save D to folder. np.save( os.path.join(args.outdir, f"{saved_name}-norm-feats.npy"), post_mel_output.astype(np.float32), allow_pickle=False, )
def main(): """Running extract tacotron-2 durations.""" parser = argparse.ArgumentParser( description="Extract durations from charactor with trained Tacotron-2 " "(See detail in tensorflow_tts/example/tacotron-2/extract_duration.py)." ) parser.add_argument( "--rootdir", default=None, type=str, required=True, help="directory including ids/durations files.", ) parser.add_argument("--outdir", type=str, required=True, help="directory to save generated speech.") parser.add_argument("--checkpoint", type=str, required=True, help="checkpoint file to be loaded.") parser.add_argument("--use-norm", default=1, type=int, help="usr norm-mels for train or raw.") parser.add_argument("--batch-size", default=8, type=int, help="batch size.") parser.add_argument("--win-front", default=2, type=int, help="win-front.") parser.add_argument("--win-back", default=2, type=int, help="win-front.") parser.add_argument("--save-alignment", default=0, type=int, help="save-alignment.") parser.add_argument( "--config", default=None, type=str, required=True, help="yaml format configuration file. if not explicitly provided, " "it will be searched in the checkpoint directory. (default=None)", ) parser.add_argument( "--verbose", type=int, default=1, help="logging level. higher is more logging. (default=1)", ) args = parser.parse_args() # set logger if args.verbose > 1: logging.basicConfig( level=logging.DEBUG, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) elif args.verbose > 0: logging.basicConfig( level=logging.INFO, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) else: logging.basicConfig( level=logging.WARN, format= "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) logging.warning("Skip DEBUG/INFO messages") # check directory existence if not os.path.exists(args.outdir): os.makedirs(args.outdir) # load config with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) config.update(vars(args)) if config["format"] == "npy": char_query = "*-ids.npy" mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy" char_load_fn = np.load mel_load_fn = np.load else: raise ValueError("Only npy is supported.") # define data-loader dataset = CharactorMelDataset( root_dir=args.rootdir, charactor_query=char_query, mel_query=mel_query, charactor_load_fn=char_load_fn, mel_load_fn=mel_load_fn, return_utt_id=True, return_guided_attention=False, ) dataset = dataset.create(allow_cache=True, batch_size=args.batch_size) # define model and load checkpoint tacotron2 = TFTacotron2( config=Tacotron2Config(**config["tacotron2_params"]), training=True, # enable teacher forcing mode. name="tacotron2", ) tacotron2._build() # build model to be able load_weights. tacotron2.load_weights(args.checkpoint) for data in tqdm(dataset, desc="[Extract Duration]"): utt_id, charactor, char_length, mel, mel_length = data utt_id = utt_id.numpy() # tacotron2 inference. mel_outputs, post_mel_outputs, stop_outputs, alignment_historys = tacotron2( charactor, char_length, speaker_ids=tf.zeros(shape=[tf.shape(charactor)[0]]), mel_outputs=mel, mel_lengths=mel_length, use_window_mask=True, win_front=args.win_front, win_back=args.win_back, training=True, ) # convert to numpy alignment_historys = alignment_historys.numpy() for i, alignment in enumerate(alignment_historys): real_char_length = (char_length[i].numpy() - 1 ) # minus 1 because char have eos tokens. real_mel_length = mel_length[i].numpy() alignment = alignment[:real_char_length, :real_mel_length] d = get_duration_from_alignment(alignment) # [max_char_len] saved_name = utt_id[i].decode("utf-8") # check a length compatible assert ( len(d) == real_char_length ), f"different between len_char and len_durations, {len(d)} and {real_char_length}" assert ( np.sum(d) == real_mel_length ), f"different between sum_durations and len_mel, {np.sum(d)} and {real_mel_length}" # save D to folder. np.save( os.path.join(args.outdir, f"{saved_name}-durations.npy"), d.astype(np.int32), allow_pickle=False, ) # save alignment to debug. if args.save_alignment == 1: figname = os.path.join(args.outdir, f"{saved_name}_alignment.png") fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111) ax.set_title(f"Alignment of {saved_name}") im = ax.imshow(alignment, aspect="auto", origin="lower", interpolation="none") fig.colorbar(im, ax=ax) xlabel = "Decoder timestep" plt.xlabel(xlabel) plt.ylabel("Encoder timestep") plt.tight_layout() plt.savefig(figname) plt.close()
print(tf.__version__) # initialize melgan model with open(config_lp.multiband_melgan_baker) as f: melgan_config = yaml.load(f, Loader=yaml.Loader) melgan_config = MelGANGeneratorConfig( **melgan_config["multiband_melgan_generator_params"]) melgan = TFMelGANGenerator(config=melgan_config, name='mb_melgan') melgan._build() melgan.load_weights(config_lp.multiband_melgan_pretrained_path) # initialize Tacotron2 model. with open(config_lp.tacotron2_baker) as f: config = yaml.load(f, Loader=yaml.Loader) config = Tacotron2Config(**config["tacotron2_params"]) tacotron2 = TFTacotron2(config=config, training=False, name="tacotron2v2", enable_tflite_convertible=True) # Newly added : tacotron2.setup_window(win_front=6, win_back=6) tacotron2.setup_maximum_iterations(3000) tacotron2._build() tacotron2.load_weights(config_lp.tacotron2_pretrained_path) tacotron2.summary() # Concrete Function tacotron2_concrete_function = tacotron2.inference_tflite.get_concrete_function(
def main(): """Running extract tacotron-2 durations.""" parser = argparse.ArgumentParser( description="Extract durations from charactor with trained Tacotron-2 " "(See detail in tensorflow_tts/example/tacotron-2/extract_duration.py).") parser.add_argument("--rootdir",default=None,type=str, required=True,help="directory including ids/durations files.",) parser.add_argument("--outdir", type=str, required=True, help="directory to save generated speech.") parser.add_argument("--checkpoint", type=str, required=True, help="checkpoint file to be loaded." ) parser.add_argument("--use-norm", default=1, type=int, help="usr norm-mels for train or raw.") parser.add_argument("--batch-size", default=8, type=int, help="batch size.") parser.add_argument("--win-front", default=3, type=int, help="win-front.") parser.add_argument("--win-back", default=3, type=int, help="win-front.") parser.add_argument("--use-window-mask", default=1, type=int, help="toggle window masking." ) parser.add_argument("--save-alignment", default=1, type=int, help="save-alignment.") parser.add_argument("--dataset_mapping", default="dump/baker_mapper.json", type=str, ) parser.add_argument("--config", default=None, type=str, required=True, help="yaml format configuration file. if not explicitly provided, it will be searched in the checkpoint directory. (default=None)", ) parser.add_argument( "--verbose", type=int,default=1, help="logging level. higher is more logging. (default=1)", ) args = parser.parse_args() print(args) # set logger if args.verbose > 1: logging.basicConfig(level=logging.DEBUG,format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",) elif args.verbose > 0: logging.basicConfig(level=logging.INFO,format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",) else: logging.basicConfig(level=logging.WARN,format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",) logging.warning("Skip DEBUG/INFO messages") # check directory existence if not os.path.exists(args.outdir): os.makedirs(args.outdir) if not os.path.exists(args.outdir+'_align_fig'): os.makedirs(args.outdir+'_align_fig') # load config with open(args.config) as f: config = yaml.load(f, Loader=yaml.Loader) config.update(vars(args)) if config["format"] == "npy": char_query = "*-ids.npy" mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy" char_load_fn = np.load mel_load_fn = np.load else: raise ValueError("Only npy is supported.") with open(args.dataset_mapping) as f: dataset_mapping = json.load(f) speakers_map = dataset_mapping["speakers_map"] # Check n_speakers matches number of speakers in speakers_map n_speakers = config["tacotron2_params"]["n_speakers"] # define data-loader dataset = CharactorMelDataset( dataset=config["tacotron2_params"]["dataset"], root_dir=args.rootdir, charactor_query=char_query, mel_query=mel_query, charactor_load_fn=char_load_fn, mel_load_fn=mel_load_fn, reduction_factor=config["tacotron2_params"]["reduction_factor"], use_fixed_shapes=True, speakers_map=speakers_map, ) dataset = dataset.create(allow_cache=True, batch_size=args.batch_size, drop_remainder=False) # define model and load checkpoint tacotron2 = TFTacotron2(config=Tacotron2Config(**config["tacotron2_params"]), name="tacotron2",) tacotron2._build() # build model to be able load_weights. tacotron2.load_weights(args.checkpoint) # apply tf.function for tacotron2. tacotron2 = tf.function(tacotron2, experimental_relax_shapes=True) for data in tqdm(dataset, desc="[Extract Duration]"): utt_ids = data["utt_ids"] input_lengths = data["input_lengths"] mel_lengths = data["mel_lengths"] utt_ids = utt_ids.numpy() real_mel_lengths = data["real_mel_lengths"] del data["real_mel_lengths"] # tacotron2 inference. mel_outputs, post_mel_outputs, stop_outputs, alignment_historys = tacotron2(**data,use_window_mask=args.use_window_mask,win_front=args.win_front,win_back=args.win_back,training=True,) # convert to numpy alignment_historys = alignment_historys.numpy() for i, alignment in enumerate(alignment_historys): real_char_length = input_lengths[i].numpy() real_mel_length = real_mel_lengths[i].numpy() alignment_mel_length = int(np.ceil( real_mel_length / config["tacotron2_params"]["reduction_factor"]) ) alignment = alignment[:real_char_length, :alignment_mel_length] d = get_duration_from_alignment(alignment) # [max_char_len] d = d * config["tacotron2_params"]["reduction_factor"] assert ( np.sum(d) >= real_mel_length ), f"{d}, {np.sum(d)}, {alignment_mel_length}, {real_mel_length}" if np.sum(d) > real_mel_length: rest = np.sum(d) - real_mel_length # print(d, np.sum(d), real_mel_length) if d[-1] > rest: d[-1] -= rest elif d[0] > rest: d[0] -= rest else: d[-1] -= rest // 2 d[0] -= rest - rest // 2 assert d[-1] >= 0 and d[0] >= 0, f"{d}, {np.sum(d)}, {real_mel_length}" saved_name = utt_ids[i].decode("utf-8") # check a length compatible assert ( len(d) == real_char_length ), f"different between len_char and len_durations, {len(d)} and {real_char_length}" assert ( np.sum(d) == real_mel_length ), f"different between sum_durations and len_mel, {np.sum(d)} and {real_mel_length}" # save D to folder. np.save( os.path.join(args.outdir, f"{saved_name}-durations.npy"), d.astype(np.int32), allow_pickle=False,) # save alignment to debug. if args.save_alignment == 1: figname = os.path.join(args.outdir+'_align_fig/', f"{saved_name}_alignment.png") #print(figname) fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111) ax.set_title(f"Alignment of {saved_name}") im = ax.imshow( alignment, aspect="auto", origin="lower", interpolation="none" ) fig.colorbar(im, ax=ax) xlabel = "Decoder timestep" plt.xlabel(xlabel) plt.ylabel("Encoder timestep") plt.tight_layout() plt.savefig(figname) plt.close()