Пример #1
0
def test_tacotron2_train_some_layers(var_train_expr, config_path):
    config = Tacotron2Config(n_speakers=5, reduction_factor=1)
    model = TFTacotron2(config, training=True)
    model._build()
    optimizer = tf.keras.optimizers.Adam(lr=0.001)

    with open(config_path) as f:
        config = yaml.load(f, Loader=yaml.Loader)

    config.update({"outdir": "./"})
    config.update({"var_train_expr": var_train_expr})

    STRATEGY = return_strategy()

    trainer = Tacotron2Trainer(
        config=config,
        strategy=STRATEGY,
        steps=0,
        epochs=0,
        is_mixed_precision=False,
    )
    trainer.compile(model, optimizer)

    len_trainable_vars = len(trainer._trainable_variables)
    all_trainable_vars = len(model.trainable_variables)

    if var_train_expr is None:
        tf.debugging.assert_equal(len_trainable_vars, all_trainable_vars)
    else:
        tf.debugging.assert_less(len_trainable_vars, all_trainable_vars)
Пример #2
0
    def _converter_model(self):
        with open( config.tacotron2_baker ) as f:
            conf = yaml.load(f, Loader=yaml.Loader)
        conf = Tacotron2Config(**conf["tacotron2_params"])
        self.tacotron2 = TFTacotron2(config=conf, training=False, name="tacotron2", enable_tflite_convertible=True)
        self.tacotron2.setup_window(win_front=5, win_back=5)
        self.tacotron2.setup_maximum_iterations(1000) # be careful
        self.tacotron2._build()
        self.tacotron2.load_weights(config.tacotron2_pretrained_path)
        tacotron2_concrete_function = self.tacotron2.inference_tflite.get_concrete_function()
        converter = tf.lite.TFLiteConverter.from_concrete_functions( [tacotron2_concrete_function] )
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ]
        tflite_model = converter.convert()
        with open('tacotron2.tflite', 'wb') as f:
            f.write(tflite_model)
        
        print('Model size is %f MBs.' % (len(tflite_model) / 1024 / 1024.0) )

        #tacotron2_config = AutoConfig.from_pretrained( config.tacotron2_baker )
        #self.tacotron2 = TFAutoModel.from_pretrained( config=tacotron2_config, pretrained_path='tacotron2.tflite', training=False,  name="tacotron2" )
        #self.tacotron2.setup_window(win_front=5, win_back=5)
        self.interpreter = tf.lite.Interpreter(model_path='tacotron2.tflite')
        self.interpreter.allocate_tensors()
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

        mb_melgan_config = AutoConfig.from_pretrained( config.multiband_melgan_baker )
        self.mb_melgan = TFAutoModel.from_pretrained( config=mb_melgan_config, pretrained_path=config.multiband_melgan_pretrained_path, name="mb_melgan" )

        self.processor = AutoProcessor.from_pretrained(pretrained_path=config.baker_mapper_pretrained_path)
Пример #3
0
def test_tacotron2_trainable(n_speakers, n_chars, max_input_length, max_mel_length, batch_size):
    config = Tacotron2Config(n_speakers=n_speakers, reduction_factor=1)
    model = TFTacotron2(config, training=True)
    # model._build()

    # fake input
    input_ids = tf.random.uniform([batch_size, max_input_length], maxval=n_chars, dtype=tf.int32)
    speaker_ids = tf.convert_to_tensor([0] * batch_size, tf.int32)
    mel_outputs = tf.random.uniform(shape=[batch_size, max_mel_length, 80])
    mel_lengths = np.random.randint(max_mel_length, high=max_mel_length + 1, size=[batch_size])
    mel_lengths[-1] = max_mel_length
    mel_lengths = tf.convert_to_tensor(mel_lengths, dtype=tf.int32)

    stop_tokens = np.zeros((batch_size, max_mel_length), np.float32)
    stop_tokens = tf.convert_to_tensor(stop_tokens)

    optimizer = tf.keras.optimizers.Adam(lr=0.001)

    binary_crossentropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

    @tf.function(experimental_relax_shapes=True)
    def one_step_training(input_ids, speaker_ids, mel_outputs, mel_lengths):
        with tf.GradientTape() as tape:
            mel_preds, \
                post_mel_preds, \
                stop_preds, \
                alignment_history = model(input_ids,
                                          tf.constant([max_input_length, max_input_length]),
                                          speaker_ids,
                                          mel_outputs,
                                          mel_lengths,
                                          training=True)
            loss_before = tf.keras.losses.MeanSquaredError()(mel_outputs, mel_preds)
            loss_after = tf.keras.losses.MeanSquaredError()(mel_outputs, post_mel_preds)

            stop_gts = tf.expand_dims(tf.range(tf.reduce_max(mel_lengths), dtype=tf.int32), 0)  # [1, max_len]
            stop_gts = tf.tile(stop_gts, [tf.shape(mel_lengths)[0], 1])  # [B, max_len]
            stop_gts = tf.cast(tf.math.greater_equal(stop_gts, tf.expand_dims(mel_lengths, 1) - 1), tf.float32)

            # calculate stop_token loss
            stop_token_loss = binary_crossentropy(stop_gts, stop_preds)

            loss = stop_token_loss + loss_before + loss_after

        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        return loss, alignment_history

    for i in range(2):
        if i == 1:
            start = time.time()
        loss, alignment_history = one_step_training(input_ids,
                                                    speaker_ids, mel_outputs, mel_lengths)
        print(f" > loss: {loss}")
    total_runtime = time.time() - start
    print(f" > Total run-time: {total_runtime}")
    print(f" > Avg run-time: {total_runtime/10}")
Пример #4
0
    def _load_tacotron(self, path=OUT_TACOTRON_TFLITE_DIR):
        # initialize Tacotron2 model.
        config = os.path.join(path, 'config.yml')
        with open(config) as f:
            config = yaml.load(f, Loader=yaml.Loader)
        config = Tacotron2Config(**config["tacotron2_params"])
        tacotron2 = TFTacotron2(config=config,
                                training=False,
                                name="tacotron2v1",
                                enable_tflite_convertible=True)

        # Newly added :
        tacotron2.setup_window(win_front=6, win_back=6)
        tacotron2.setup_maximum_iterations(3000)

        tacotron2._build()
        weights = os.path.join(path, 'model-120000.h5')
        tacotron2.load_weights(weights)
        print(tacotron2.summary())
        return tacotron2
Пример #5
0
def main():
    """Running decode tacotron-2 mel-spectrogram."""
    parser = argparse.ArgumentParser(
        description=
        "Decode mel-spectrogram from folder ids with trained Tacotron-2 "
        "(See detail in tensorflow_tts/example/tacotron2/decode_tacotron2.py)."
    )
    parser.add_argument(
        "--rootdir",
        default=None,
        type=str,
        required=True,
        help="directory including ids/durations files.",
    )
    parser.add_argument("--outdir",
                        type=str,
                        required=True,
                        help="directory to save generated speech.")
    parser.add_argument("--checkpoint",
                        type=str,
                        required=True,
                        help="checkpoint file to be loaded.")
    parser.add_argument("--use-norm",
                        default=1,
                        type=int,
                        help="usr norm-mels for train or raw.")
    parser.add_argument("--batch-size",
                        default=8,
                        type=int,
                        help="batch size.")
    parser.add_argument("--win-front", default=3, type=int, help="win-front.")
    parser.add_argument("--win-back", default=3, type=int, help="win-front.")
    parser.add_argument(
        "--config",
        default=None,
        type=str,
        required=True,
        help="yaml format configuration file. if not explicitly provided, "
        "it will be searched in the checkpoint directory. (default=None)",
    )
    parser.add_argument(
        "--verbose",
        type=int,
        default=1,
        help="logging level. higher is more logging. (default=1)",
    )
    args = parser.parse_args()

    # set logger
    if args.verbose > 1:
        logging.basicConfig(
            level=logging.DEBUG,
            format=
            "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
    elif args.verbose > 0:
        logging.basicConfig(
            level=logging.INFO,
            format=
            "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
    else:
        logging.basicConfig(
            level=logging.WARN,
            format=
            "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
        logging.warning("Skip DEBUG/INFO messages")

    # check directory existence
    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)

    # load config
    with open(args.config) as f:
        config = yaml.load(f, Loader=yaml.Loader)
    config.update(vars(args))

    if config["format"] == "npy":
        char_query = "*-ids.npy"
        mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
        char_load_fn = np.load
        mel_load_fn = np.load
    else:
        raise ValueError("Only npy is supported.")

    # define data-loader
    dataset = CharactorMelDataset(
        dataset=config["tacotron2_params"]["dataset"],
        root_dir=args.rootdir,
        charactor_query=char_query,
        mel_query=mel_query,
        charactor_load_fn=char_load_fn,
        mel_load_fn=mel_load_fn,
        reduction_factor=config["tacotron2_params"]["reduction_factor"])
    dataset = dataset.create(allow_cache=True, batch_size=args.batch_size)

    # define model and load checkpoint
    tacotron2 = TFTacotron2(
        config=Tacotron2Config(**config["tacotron2_params"]),
        name="tacotron2",
    )
    tacotron2._build()  # build model to be able load_weights.
    tacotron2.load_weights(args.checkpoint)

    # setup window
    tacotron2.setup_window(win_front=args.win_front, win_back=args.win_back)

    for data in tqdm(dataset, desc="[Decoding]"):
        utt_ids = data["utt_ids"]
        utt_ids = utt_ids.numpy()

        # tacotron2 inference.
        (
            mel_outputs,
            post_mel_outputs,
            stop_outputs,
            alignment_historys,
        ) = tacotron2.inference(
            input_ids=data["input_ids"],
            input_lengths=data["input_lengths"],
            speaker_ids=data["speaker_ids"],
        )

        # convert to numpy
        post_mel_outputs = post_mel_outputs.numpy()

        for i, post_mel_output in enumerate(post_mel_outputs):
            stop_token = tf.math.round(tf.nn.sigmoid(stop_outputs[i]))  # [T]
            real_length = tf.math.reduce_sum(
                tf.cast(tf.math.equal(stop_token, 0.0), tf.int32), -1)
            post_mel_output = post_mel_output[:real_length, :]

            saved_name = utt_ids[i].decode("utf-8")

            # save D to folder.
            np.save(
                os.path.join(args.outdir, f"{saved_name}-norm-feats.npy"),
                post_mel_output.astype(np.float32),
                allow_pickle=False,
            )
Пример #6
0
def main():
    """Run training process."""
    parser = argparse.ArgumentParser(
        description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
    )
    parser.add_argument(
        "--train-dir",
        default=None,
        type=str,
        help="directory including training data. ",
    )
    parser.add_argument(
        "--dev-dir",
        default=None,
        type=str,
        help="directory including development data. ",
    )
    parser.add_argument(
        "--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
    )
    parser.add_argument(
        "--outdir", type=str, required=True, help="directory to save checkpoints."
    )
    parser.add_argument(
        "--config", type=str, required=True, help="yaml format configuration file."
    )
    parser.add_argument(
        "--resume",
        default="",
        type=str,
        nargs="?",
        help='checkpoint file path to resume training. (default="")',
    )
    parser.add_argument(
        "--verbose",
        type=int,
        default=1,
        help="logging level. higher is more logging. (default=1)",
    )
    parser.add_argument(
        "--mixed_precision",
        default=0,
        type=int,
        help="using mixed precision for generator or not.",
    )
    parser.add_argument(
        "--pretrained",
        default="",
        type=str,
        nargs="?",
        help='pretrained weights .h5 file to load weights from. Auto-skips non-matching layers',
    )
    args = parser.parse_args()

    # return strategy
    STRATEGY = return_strategy()

    # set mixed precision config
    if args.mixed_precision == 1:
        tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})

    args.mixed_precision = bool(args.mixed_precision)
    args.use_norm = bool(args.use_norm)

    # set logger
    if args.verbose > 1:
        logging.basicConfig(
            level=logging.DEBUG,
            stream=sys.stdout,
            format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
    elif args.verbose > 0:
        logging.basicConfig(
            level=logging.INFO,
            stream=sys.stdout,
            format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
    else:
        logging.basicConfig(
            level=logging.WARN,
            stream=sys.stdout,
            format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
        logging.warning("Skip DEBUG/INFO messages")

    # check directory existence
    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)

    # check arguments
    if args.train_dir is None:
        raise ValueError("Please specify --train-dir")
    if args.dev_dir is None:
        raise ValueError("Please specify --valid-dir")

    # load and save config
    with open(args.config) as f:
        config = yaml.load(f, Loader=yaml.Loader)
    config.update(vars(args))
    config["version"] = tensorflow_tts.__version__

    # get dataset
    if config["remove_short_samples"]:
        mel_length_threshold = config["mel_length_threshold"]
    else:
        mel_length_threshold = 0

    if config["format"] == "npy":
        charactor_query = "*-ids.npy"
        mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
        charactor_load_fn = np.load
        mel_load_fn = np.load
    else:
        raise ValueError("Only npy are supported.")

    train_dataset = CharactorMelDataset(
        dataset=config["tacotron2_params"]["dataset"],
        root_dir=args.train_dir,
        charactor_query=charactor_query,
        mel_query=mel_query,
        charactor_load_fn=charactor_load_fn,
        mel_load_fn=mel_load_fn,
        mel_length_threshold=mel_length_threshold,
        reduction_factor=config["tacotron2_params"]["reduction_factor"],
        use_fixed_shapes=config["use_fixed_shapes"],
    )

    # update max_mel_length and max_char_length to config
    config.update({"max_mel_length": int(train_dataset.max_mel_length)})
    config.update({"max_char_length": int(train_dataset.max_char_length)})

    with open(os.path.join(args.outdir, "config.yml"), "w") as f:
        yaml.dump(config, f, Dumper=yaml.Dumper)
    for key, value in config.items():
        logging.info(f"{key} = {value}")

    train_dataset = train_dataset.create(
        is_shuffle=config["is_shuffle"],
        allow_cache=config["allow_cache"],
        batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
    )

    valid_dataset = CharactorMelDataset(
        dataset=config["tacotron2_params"]["dataset"],
        root_dir=args.dev_dir,
        charactor_query=charactor_query,
        mel_query=mel_query,
        charactor_load_fn=charactor_load_fn,
        mel_load_fn=mel_load_fn,
        mel_length_threshold=mel_length_threshold,
        reduction_factor=config["tacotron2_params"]["reduction_factor"],
        use_fixed_shapes=False,  # don't need apply fixed shape for evaluation.
    ).create(
        is_shuffle=config["is_shuffle"],
        allow_cache=config["allow_cache"],
        batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
    )

    # define trainer
    trainer = Tacotron2Trainer(
        config=config,
        strategy=STRATEGY,
        steps=0,
        epochs=0,
        is_mixed_precision=args.mixed_precision,
    )

    with STRATEGY.scope():
        # define model.
        tacotron_config = Tacotron2Config(**config["tacotron2_params"])
        tacotron2 = TFTacotron2(config=tacotron_config, training=True, name="tacotron2")
        tacotron2._build()
        tacotron2.summary()
        
        if len(args.pretrained) > 1:
            tacotron2.load_weights(args.pretrained, by_name=True, skip_mismatch=True)
            logging.info(f"Successfully loaded pretrained weight from {args.pretrained}.")




        # AdamW for tacotron2
        learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
            initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
            decay_steps=config["optimizer_params"]["decay_steps"],
            end_learning_rate=config["optimizer_params"]["end_learning_rate"],
        )

        learning_rate_fn = WarmUp(
            initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
            decay_schedule_fn=learning_rate_fn,
            warmup_steps=int(
                config["train_max_steps"]
                * config["optimizer_params"]["warmup_proportion"]
            ),
        )

        optimizer = AdamWeightDecay(
            learning_rate=learning_rate_fn,
            weight_decay_rate=config["optimizer_params"]["weight_decay"],
            beta_1=0.9,
            beta_2=0.98,
            epsilon=1e-6,
            exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
        )

        _ = optimizer.iterations

    # compile trainer
    trainer.compile(model=tacotron2, optimizer=optimizer)

    # start training
    try:
        trainer.fit(
            train_dataset,
            valid_dataset,
            saved_path=os.path.join(config["outdir"], "checkpoints/"),
            resume=args.resume,
        )
    except KeyboardInterrupt:
        trainer.save_checkpoint()
        logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
Пример #7
0
def main():
    """Running extract tacotron-2 durations."""
    parser = argparse.ArgumentParser(
        description="Extract durations from charactor with trained Tacotron-2 "
        "(See detail in tensorflow_tts/example/tacotron-2/extract_duration.py)."
    )
    parser.add_argument(
        "--rootdir",
        default=None,
        type=str,
        required=True,
        help="directory including ids/durations files.",
    )
    parser.add_argument("--outdir",
                        type=str,
                        required=True,
                        help="directory to save generated speech.")
    parser.add_argument("--checkpoint",
                        type=str,
                        required=True,
                        help="checkpoint file to be loaded.")
    parser.add_argument("--use-norm",
                        default=1,
                        type=int,
                        help="usr norm-mels for train or raw.")
    parser.add_argument("--batch-size",
                        default=8,
                        type=int,
                        help="batch size.")
    parser.add_argument("--win-front", default=2, type=int, help="win-front.")
    parser.add_argument("--win-back", default=2, type=int, help="win-front.")
    parser.add_argument("--save-alignment",
                        default=0,
                        type=int,
                        help="save-alignment.")
    parser.add_argument(
        "--config",
        default=None,
        type=str,
        required=True,
        help="yaml format configuration file. if not explicitly provided, "
        "it will be searched in the checkpoint directory. (default=None)",
    )
    parser.add_argument(
        "--verbose",
        type=int,
        default=1,
        help="logging level. higher is more logging. (default=1)",
    )
    args = parser.parse_args()

    # set logger
    if args.verbose > 1:
        logging.basicConfig(
            level=logging.DEBUG,
            format=
            "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
    elif args.verbose > 0:
        logging.basicConfig(
            level=logging.INFO,
            format=
            "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
    else:
        logging.basicConfig(
            level=logging.WARN,
            format=
            "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
        logging.warning("Skip DEBUG/INFO messages")

    # check directory existence
    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)

    # load config
    with open(args.config) as f:
        config = yaml.load(f, Loader=yaml.Loader)
    config.update(vars(args))

    if config["format"] == "npy":
        char_query = "*-ids.npy"
        mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
        char_load_fn = np.load
        mel_load_fn = np.load
    else:
        raise ValueError("Only npy is supported.")

    # define data-loader
    dataset = CharactorMelDataset(
        root_dir=args.rootdir,
        charactor_query=char_query,
        mel_query=mel_query,
        charactor_load_fn=char_load_fn,
        mel_load_fn=mel_load_fn,
        return_utt_id=True,
        return_guided_attention=False,
    )
    dataset = dataset.create(allow_cache=True, batch_size=args.batch_size)

    # define model and load checkpoint
    tacotron2 = TFTacotron2(
        config=Tacotron2Config(**config["tacotron2_params"]),
        training=True,  # enable teacher forcing mode.
        name="tacotron2",
    )
    tacotron2._build()  # build model to be able load_weights.
    tacotron2.load_weights(args.checkpoint)

    for data in tqdm(dataset, desc="[Extract Duration]"):
        utt_id, charactor, char_length, mel, mel_length = data
        utt_id = utt_id.numpy()

        # tacotron2 inference.
        mel_outputs, post_mel_outputs, stop_outputs, alignment_historys = tacotron2(
            charactor,
            char_length,
            speaker_ids=tf.zeros(shape=[tf.shape(charactor)[0]]),
            mel_outputs=mel,
            mel_lengths=mel_length,
            use_window_mask=True,
            win_front=args.win_front,
            win_back=args.win_back,
            training=True,
        )

        # convert to numpy
        alignment_historys = alignment_historys.numpy()

        for i, alignment in enumerate(alignment_historys):
            real_char_length = (char_length[i].numpy() - 1
                                )  # minus 1 because char have eos tokens.
            real_mel_length = mel_length[i].numpy()
            alignment = alignment[:real_char_length, :real_mel_length]
            d = get_duration_from_alignment(alignment)  # [max_char_len]

            saved_name = utt_id[i].decode("utf-8")

            # check a length compatible
            assert (
                len(d) == real_char_length
            ), f"different between len_char and len_durations, {len(d)} and {real_char_length}"

            assert (
                np.sum(d) == real_mel_length
            ), f"different between sum_durations and len_mel, {np.sum(d)} and {real_mel_length}"

            # save D to folder.
            np.save(
                os.path.join(args.outdir, f"{saved_name}-durations.npy"),
                d.astype(np.int32),
                allow_pickle=False,
            )

            # save alignment to debug.
            if args.save_alignment == 1:
                figname = os.path.join(args.outdir,
                                       f"{saved_name}_alignment.png")
                fig = plt.figure(figsize=(8, 6))
                ax = fig.add_subplot(111)
                ax.set_title(f"Alignment of {saved_name}")
                im = ax.imshow(alignment,
                               aspect="auto",
                               origin="lower",
                               interpolation="none")
                fig.colorbar(im, ax=ax)
                xlabel = "Decoder timestep"
                plt.xlabel(xlabel)
                plt.ylabel("Encoder timestep")
                plt.tight_layout()
                plt.savefig(figname)
                plt.close()
Пример #8
0
def main():
    parser = argparse.ArgumentParser(description="Dump Tacotron2")
    parser.add_argument("--outdir",
                        default="./",
                        type=str,
                        help="directory to save pb or tflite file.")
    parser.add_argument("--checkpoint",
                        type=str,
                        required=True,
                        help="checkpoint file to be loaded.")
    parser.add_argument("--vocab_size",
                        type=int,
                        required=True,
                        help="vocab size")
    parser.add_argument("--tflite",
                        type=bool,
                        default=False,
                        help="saved model to tflite")
    args = parser.parse_args()

    # check directory existence(checkpoint)
    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)

    if args.checkpoint is not None and os.path.isdir(args.checkpoint):
        args.checkpoint = tf.train.latest_checkpoint(args.checkpoint)

    save_name = os.path.splitext(os.path.basename(args.checkpoint))[0]
    config = Config(args.outdir, args.vocab_size)

    # define model.
    tacotron2 = TFTacotron2(config=config,
                            training=False,
                            name="tacotron2",
                            enable_tflite_convertible=args.tflite)

    # Newly added :
    tacotron2.setup_window(win_front=6, win_back=6)
    tacotron2.setup_maximum_iterations(3000)

    #build
    if args.tflite is True:
        print("dump tflite => vocab_size: {}".format(args.vocab_size))
        input_ids = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])
        input_lengths = np.array([9])
        speaker_ids = np.array([0])
        mel_outputs = np.random.normal(size=(1, 50,
                                             config.n_mels)).astype(np.float32)
        mel_lengths = np.array([50])
        tacotron2(input_ids,
                  input_lengths,
                  speaker_ids,
                  mel_outputs,
                  mel_lengths,
                  10,
                  training=False)
        tacotron2.load_weights(args.checkpoint)
        tacotron2.summary()
        tacotron2_concrete_function = tacotron2.inference_tflite.get_concrete_function(
        )
        converter = tf.lite.TFLiteConverter.from_concrete_functions(
            [tacotron2_concrete_function])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
        ]
        tflite_model = converter.convert()

        with open(os.path.join(args.outdir, "{}.tflite".format(save_name)),
                  'wb') as f:
            f.write(tflite_model)
    else:
        print("dump => vocab_size: {}".format(args.vocab_size))
        # tensorflow-gpu==2.3.0 bug to load_weight after call inference
        tacotron2.inference(input_ids=tf.expand_dims(
            tf.convert_to_tensor([0], dtype=tf.int32), 0),
                            input_lengths=tf.convert_to_tensor([1], tf.int32),
                            speaker_ids=tf.convert_to_tensor([0],
                                                             dtype=tf.int32))
        tacotron2.load_weights(args.checkpoint)
        tf.saved_model.save(tacotron2,
                            os.path.join(args.outdir, save_name),
                            signatures=tacotron2.inference)
Пример #9
0
# initialize melgan model
with open(config_lp.multiband_melgan_baker) as f:
    melgan_config = yaml.load(f, Loader=yaml.Loader)
melgan_config = MelGANGeneratorConfig(
    **melgan_config["multiband_melgan_generator_params"])
melgan = TFMelGANGenerator(config=melgan_config, name='mb_melgan')
melgan._build()
melgan.load_weights(config_lp.multiband_melgan_pretrained_path)

# initialize Tacotron2 model.
with open(config_lp.tacotron2_baker) as f:
    config = yaml.load(f, Loader=yaml.Loader)
config = Tacotron2Config(**config["tacotron2_params"])
tacotron2 = TFTacotron2(config=config,
                        training=False,
                        name="tacotron2v2",
                        enable_tflite_convertible=True)

# Newly added :
tacotron2.setup_window(win_front=6, win_back=6)
tacotron2.setup_maximum_iterations(3000)

tacotron2._build()
tacotron2.load_weights(config_lp.tacotron2_pretrained_path)
tacotron2.summary()

# Concrete Function
tacotron2_concrete_function = tacotron2.inference_tflite.get_concrete_function(
)

converter = tf.lite.TFLiteConverter.from_concrete_functions(
Пример #10
0
def main():
    """Run training process."""
    parser = argparse.ArgumentParser(description="Train Tacotron2")
    parser.add_argument("--outdir", type=str, required=True, help="directory to save checkpoints.")
    parser.add_argument("--rootdir", type=str, required=True, help="dataset directory root")
    parser.add_argument("--resume",default="",type=str,nargs="?",help='checkpoint file path to resume training. (default="")')
    parser.add_argument("--verbose",type=int,default=1,help="logging level. higher is more logging. (default=1)")
    parser.add_argument("--batch-size", default=12, type=int, help="batch size.")
    parser.add_argument("--mixed_precision",default=0,type=int,help="using mixed precision for generator or not.")
    args = parser.parse_args()
    
    if args.resume is not None and os.path.isdir(args.resume):
        args.resume = tf.train.latest_checkpoint(args.resume)
    
    # set mixed precision config
    if args.mixed_precision == 1:
        tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})

    args.mixed_precision = bool(args.mixed_precision)
    
    # set logger
    log_format = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
    if args.verbose > 1:
        logging.basicConfig(level=logging.DEBUG,stream=sys.stdout,format=log_format)
    elif args.verbose > 0:
        logging.basicConfig(level=logging.INFO,stream=sys.stdout,format=log_format)
    else:
        logging.basicConfig(level=logging.WARN,stream=sys.stdout,format=log_format)
        logging.warning("Skip DEBUG/INFO messages")

    # check directory existence(checkpoint)
    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)
    
    # select processor
    Processor = JSpeechProcessor     # for test
    
    processor = Processor(rootdir=args.rootdir)
    
    config = Config(args.outdir, args.batch_size, processor.vocab_size())
    
    max_mel_length = processor.max_feat_length() // config.n_mels
    max_seq_length = processor.max_seq_length()
    
    # split train and test 
    train_split, valid_split = train_test_split(processor.items, test_size=config.test_size,random_state=42,shuffle=True)
    train_dataset = generate_datasets(train_split, config, max_mel_length, max_seq_length)
    valid_dataset = generate_datasets(valid_split, config, max_mel_length, max_seq_length)
     
    # define trainer
    trainer = Tacotron2Trainer(
        config=config,
        strategy=STRATEGY,
        steps=0,
        epochs=0,
        is_mixed_precision=args.mixed_precision
    )
    
    with STRATEGY.scope():
        # define model.
        tacotron2 = TFTacotron2(config=config, training=True, name="tacotron2")
        
        #build
        input_ids = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])
        input_lengths = np.array([9])
        speaker_ids = np.array([0])
        mel_outputs = np.random.normal(size=(1, 50, config.n_mels)).astype(np.float32)
        mel_lengths = np.array([50])
        tacotron2(input_ids,input_lengths,speaker_ids,mel_outputs,mel_lengths,10,training=True)
        tacotron2.summary()

        # AdamW for tacotron2
        learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
            initial_learning_rate=config.initial_learning_rate,
            decay_steps=config.decay_steps,
            end_learning_rate=config.end_learning_rate,
        )

        learning_rate_fn = WarmUp(
            initial_learning_rate=config.initial_learning_rate,
            decay_schedule_fn=learning_rate_fn,
            warmup_steps=int(config.train_max_steps* config.warmup_proportion),
        )

        optimizer = AdamWeightDecay(
            learning_rate=learning_rate_fn,
            weight_decay_rate=config.weight_decay,
            beta_1=0.9,
            beta_2=0.98,
            epsilon=1e-6,
            exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
        )

        _ = optimizer.iterations

    # compile trainer
    trainer.compile(model=tacotron2, optimizer=optimizer)

    # start training
    try:
        trainer.fit(train_dataset,valid_dataset,saved_path=os.path.join(args.outdir, "checkpoints/"),resume=args.resume)
    except KeyboardInterrupt:
        trainer.save_checkpoint()
        logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
Пример #11
0
def main():
    """Running extract tacotron-2 durations."""
    parser = argparse.ArgumentParser(
        description="Extract durations from charactor with trained Tacotron-2 "
        "(See detail in tensorflow_tts/example/tacotron-2/extract_duration.py).")
    parser.add_argument("--rootdir",default=None,type=str, required=True,help="directory including ids/durations files.",)
    parser.add_argument("--outdir", type=str, required=True, help="directory to save generated speech.")
    parser.add_argument("--checkpoint", type=str, required=True, help="checkpoint file to be loaded." )
    parser.add_argument("--use-norm", default=1, type=int, help="usr norm-mels for train or raw.")
    parser.add_argument("--batch-size", default=8, type=int, help="batch size.")
    parser.add_argument("--win-front", default=3, type=int, help="win-front.")
    parser.add_argument("--win-back", default=3, type=int, help="win-front.")
    parser.add_argument("--use-window-mask", default=1, type=int, help="toggle window masking."  )
    parser.add_argument("--save-alignment", default=1, type=int, help="save-alignment.")
    parser.add_argument("--dataset_mapping", default="dump/baker_mapper.json", type=str, )
    parser.add_argument("--config", default=None, type=str,  required=True,   help="yaml format configuration file. if not explicitly provided, it will be searched in the checkpoint directory. (default=None)", )
    parser.add_argument( "--verbose",  type=int,default=1,  help="logging level. higher is more logging. (default=1)",  )
    args = parser.parse_args()
    print(args)

    # set logger
    if args.verbose > 1:
        logging.basicConfig(level=logging.DEBUG,format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",)
    elif args.verbose > 0:
        logging.basicConfig(level=logging.INFO,format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",)
    else:
        logging.basicConfig(level=logging.WARN,format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",)
        logging.warning("Skip DEBUG/INFO messages")

    # check directory existence
    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)
    if not os.path.exists(args.outdir+'_align_fig'):
        os.makedirs(args.outdir+'_align_fig')

    # load config
    with open(args.config) as f:
        config = yaml.load(f, Loader=yaml.Loader)
    config.update(vars(args))

    if config["format"] == "npy":
        char_query = "*-ids.npy"
        mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
        char_load_fn = np.load
        mel_load_fn = np.load
    else:
        raise ValueError("Only npy is supported.")

    with open(args.dataset_mapping) as f:
        dataset_mapping = json.load(f)
        speakers_map = dataset_mapping["speakers_map"]
        # Check n_speakers matches number of speakers in speakers_map
    n_speakers = config["tacotron2_params"]["n_speakers"]

    # define data-loader
    dataset = CharactorMelDataset(
        dataset=config["tacotron2_params"]["dataset"],
        root_dir=args.rootdir,
        charactor_query=char_query,
        mel_query=mel_query,
        charactor_load_fn=char_load_fn,
        mel_load_fn=mel_load_fn,
        reduction_factor=config["tacotron2_params"]["reduction_factor"],
        use_fixed_shapes=True,
        speakers_map=speakers_map,
    )
    dataset = dataset.create(allow_cache=True, batch_size=args.batch_size, drop_remainder=False)

    # define model and load checkpoint
    tacotron2 = TFTacotron2(config=Tacotron2Config(**config["tacotron2_params"]), name="tacotron2",)
    tacotron2._build()  # build model to be able load_weights.
    tacotron2.load_weights(args.checkpoint)

    # apply tf.function for tacotron2.
    tacotron2 = tf.function(tacotron2, experimental_relax_shapes=True)

    for data in tqdm(dataset, desc="[Extract Duration]"):
        utt_ids = data["utt_ids"]
        input_lengths = data["input_lengths"]
        mel_lengths = data["mel_lengths"]
        utt_ids = utt_ids.numpy()
        real_mel_lengths = data["real_mel_lengths"]
        del data["real_mel_lengths"]

        # tacotron2 inference.
        mel_outputs, post_mel_outputs, stop_outputs, alignment_historys = tacotron2(**data,use_window_mask=args.use_window_mask,win_front=args.win_front,win_back=args.win_back,training=True,)

        # convert to numpy
        alignment_historys = alignment_historys.numpy()

        for i, alignment in enumerate(alignment_historys):
            real_char_length = input_lengths[i].numpy()
            real_mel_length = real_mel_lengths[i].numpy()
            alignment_mel_length = int(np.ceil( real_mel_length / config["tacotron2_params"]["reduction_factor"]) )
            alignment = alignment[:real_char_length, :alignment_mel_length]
            d = get_duration_from_alignment(alignment)  # [max_char_len]

            d = d * config["tacotron2_params"]["reduction_factor"]
            assert ( np.sum(d) >= real_mel_length ), f"{d}, {np.sum(d)}, {alignment_mel_length}, {real_mel_length}"
            if np.sum(d) > real_mel_length:
                rest = np.sum(d) - real_mel_length
                # print(d, np.sum(d), real_mel_length)
                if d[-1] > rest:
                    d[-1] -= rest
                elif d[0] > rest:
                    d[0] -= rest
                else:
                    d[-1] -= rest // 2
                    d[0] -= rest - rest // 2

                assert d[-1] >= 0 and d[0] >= 0, f"{d}, {np.sum(d)}, {real_mel_length}"

            saved_name = utt_ids[i].decode("utf-8")

            # check a length compatible
            assert ( len(d) == real_char_length ), f"different between len_char and len_durations, {len(d)} and {real_char_length}"

            assert (  np.sum(d) == real_mel_length ), f"different between sum_durations and len_mel, {np.sum(d)} and {real_mel_length}"

            # save D to folder.
            np.save( os.path.join(args.outdir, f"{saved_name}-durations.npy"), d.astype(np.int32), allow_pickle=False,)

            # save alignment to debug.
            if args.save_alignment == 1:
                figname = os.path.join(args.outdir+'_align_fig/', f"{saved_name}_alignment.png")
                #print(figname)
                fig = plt.figure(figsize=(8, 6))
                ax = fig.add_subplot(111)
                ax.set_title(f"Alignment of {saved_name}")
                im = ax.imshow( alignment, aspect="auto", origin="lower", interpolation="none" )
                fig.colorbar(im, ax=ax)
                xlabel = "Decoder timestep"
                plt.xlabel(xlabel)
                plt.ylabel("Encoder timestep")
                plt.tight_layout()
                plt.savefig(figname)
                plt.close()