Exemple #1
0
    def test_no_espeak_phonemes():
        # prepare the config
        config = VitsConfig(
            batch_size=2,
            eval_batch_size=2,
            num_loader_workers=0,
            num_eval_loader_workers=0,
            text_cleaner="english_cleaners",
            use_phonemes=True,
            use_espeak_phonemes=False,
            phoneme_language="en-us",
            phoneme_cache_path="tests/data/ljspeech/phoneme_cache/",
            run_eval=True,
            test_delay_epochs=-1,
            epochs=1,
            print_step=1,
            print_eval=True,
            datasets=[dataset_config_en, dataset_config_pt],
        )
        config.save_json(config_path)

        # run test
        run_cli(
            f'CUDA_VISIBLE_DEVICES="" python TTS/bin/find_unique_phonemes.py --config_path "{config_path}"'
        )
Exemple #2
0
    def test_multispeaker_inference(self):
        num_speakers = 10
        config = VitsConfig(num_speakers=num_speakers,
                            use_speaker_embedding=True)
        model = Vits(config).to(device)

        batch_size = 1
        input_dummy, *_ = self._create_inputs(config, batch_size=batch_size)
        speaker_ids = torch.randint(0, num_speakers,
                                    (batch_size, )).long().to(device)
        outputs = model.inference(input_dummy, {"speaker_ids": speaker_ids})
        self._check_inference_outputs(config,
                                      outputs,
                                      input_dummy,
                                      batch_size=batch_size)

        batch_size = 2
        input_dummy, input_lengths, *_ = self._create_inputs(
            config, batch_size=batch_size)
        speaker_ids = torch.randint(0, num_speakers,
                                    (batch_size, )).long().to(device)
        outputs = model.inference(input_dummy, {
            "x_lengths": input_lengths,
            "speaker_ids": speaker_ids
        })
        self._check_inference_outputs(config,
                                      outputs,
                                      input_dummy,
                                      batch_size=batch_size)
Exemple #3
0
    def test_multilingual_forward(self):
        num_speakers = 10
        num_langs = 3
        batch_size = 2

        args = VitsArgs(language_ids_file=LANG_FILE,
                        use_language_embedding=True,
                        spec_segment_size=10)
        config = VitsConfig(num_speakers=num_speakers,
                            use_speaker_embedding=True,
                            model_args=args)

        input_dummy, input_lengths, _, spec, spec_lengths, waveform = self._create_inputs(
            config, batch_size=batch_size)
        speaker_ids = torch.randint(0, num_speakers,
                                    (batch_size, )).long().to(device)
        lang_ids = torch.randint(0, num_langs,
                                 (batch_size, )).long().to(device)

        model = Vits(config).to(device)
        output_dict = model.forward(
            input_dummy,
            input_lengths,
            spec,
            spec_lengths,
            waveform,
            aux_input={
                "speaker_ids": speaker_ids,
                "language_ids": lang_ids
            },
        )
        self._check_forward_outputs(config, output_dict)
Exemple #4
0
 def test_d_vector_inference(self):
     args = VitsArgs(
         spec_segment_size=10,
         num_chars=32,
         use_d_vector_file=True,
         d_vector_dim=256,
         d_vector_file=os.path.join(get_tests_data_path(),
                                    "dummy_speakers.json"),
     )
     config = VitsConfig(model_args=args)
     model = Vits.init_from_config(config, verbose=False).to(device)
     model.eval()
     # batch size = 1
     input_dummy = torch.randint(0, 24, (1, 128)).long().to(device)
     d_vectors = torch.randn(1, 256).to(device)
     outputs = model.inference(input_dummy,
                               aux_input={"d_vectors": d_vectors})
     self._check_inference_outputs(config, outputs, input_dummy)
     # batch size = 2
     input_dummy, input_lengths, *_ = self._create_inputs(config)
     d_vectors = torch.randn(2, 256).to(device)
     outputs = model.inference(input_dummy,
                               aux_input={
                                   "x_lengths": input_lengths,
                                   "d_vectors": d_vectors
                               })
     self._check_inference_outputs(config,
                                   outputs,
                                   input_dummy,
                                   batch_size=2)
Exemple #5
0
    def test_train_step(self):
        # setup the model
        with torch.autograd.set_detect_anomaly(True):

            config = VitsConfig(
                model_args=VitsArgs(num_chars=32, spec_segment_size=10))
            model = Vits(config).to(device)
            model.train()
            # model to train
            optimizers = model.get_optimizer()
            criterions = model.get_criterion()
            criterions = [criterions[0].to(device), criterions[1].to(device)]
            # reference model to compare model weights
            model_ref = Vits(config).to(device)
            # # pass the state to ref model
            model_ref.load_state_dict(copy.deepcopy(model.state_dict()))
            count = 0
            for param, param_ref in zip(model.parameters(),
                                        model_ref.parameters()):
                assert (param - param_ref).sum() == 0, param
                count = count + 1
            for _ in range(5):
                batch = self._create_batch(config, 2)
                for idx in [0, 1]:
                    outputs, loss_dict = model.train_step(
                        batch, criterions, idx)
                    self.assertFalse(not outputs)
                    self.assertFalse(not loss_dict)
                    loss_dict["loss"].backward()
                    optimizers[idx].step()
                    optimizers[idx].zero_grad()

        # check parameter changes
        self._check_parameter_changes(model, model_ref)
Exemple #6
0
 def test_inference(self):
     num_speakers = 0
     config = VitsConfig(num_speakers=num_speakers,
                         use_speaker_embedding=True)
     input_dummy = torch.randint(0, 24, (1, 128)).long().to(device)
     model = Vits(config).to(device)
     _ = model.inference(input_dummy)
Exemple #7
0
 def test_test_run(self):
     config = VitsConfig(model_args=VitsArgs(num_chars=32))
     model = Vits.init_from_config(config, verbose=False).to(device)
     model.run_data_dep_init = False
     model.eval()
     test_figures, test_audios = model.test_run(None)
     self.assertTrue(test_figures is not None)
     self.assertTrue(test_audios is not None)
Exemple #8
0
 def test_forward(self):
     num_speakers = 0
     config = VitsConfig(num_speakers=num_speakers,
                         use_speaker_embedding=True)
     config.model_args.spec_segment_size = 10
     input_dummy, input_lengths, _, spec, spec_lengths, waveform = self._create_inputs(
         config)
     model = Vits(config).to(device)
     output_dict = model.forward(input_dummy, input_lengths, spec,
                                 spec_lengths, waveform)
     self._check_forward_outputs(config, output_dict)
Exemple #9
0
 def test_load_checkpoint(self):
     chkp_path = os.path.join(get_tests_output_path(),
                              "dummy_glow_tts_checkpoint.pth")
     config = VitsConfig(VitsArgs(num_chars=32))
     model = Vits.init_from_config(config, verbose=False).to(device)
     chkp = {}
     chkp["model"] = model.state_dict()
     torch.save(chkp, chkp_path)
     model.load_checkpoint(config, chkp_path)
     self.assertTrue(model.training)
     model.load_checkpoint(config, chkp_path, eval=True)
     self.assertFalse(model.training)
Exemple #10
0
    def test_multilingual_inference(self):
        num_speakers = 10
        num_langs = 3
        args = VitsArgs(language_ids_file=LANG_FILE,
                        use_language_embedding=True,
                        spec_segment_size=10)
        config = VitsConfig(num_speakers=num_speakers,
                            use_speaker_embedding=True,
                            model_args=args)
        model = Vits(config).to(device)

        input_dummy = torch.randint(0, 24, (1, 128)).long().to(device)
        speaker_ids = torch.randint(0, num_speakers, (1, )).long().to(device)
        lang_ids = torch.randint(0, num_langs, (1, )).long().to(device)
        _ = model.inference(input_dummy, {
            "speaker_ids": speaker_ids,
            "language_ids": lang_ids
        })

        batch_size = 1
        input_dummy, *_ = self._create_inputs(config, batch_size=batch_size)
        speaker_ids = torch.randint(0, num_speakers,
                                    (batch_size, )).long().to(device)
        lang_ids = torch.randint(0, num_langs,
                                 (batch_size, )).long().to(device)
        outputs = model.inference(input_dummy, {
            "speaker_ids": speaker_ids,
            "language_ids": lang_ids
        })
        self._check_inference_outputs(config,
                                      outputs,
                                      input_dummy,
                                      batch_size=batch_size)

        batch_size = 2
        input_dummy, input_lengths, *_ = self._create_inputs(
            config, batch_size=batch_size)
        speaker_ids = torch.randint(0, num_speakers,
                                    (batch_size, )).long().to(device)
        lang_ids = torch.randint(0, num_langs,
                                 (batch_size, )).long().to(device)
        outputs = model.inference(
            input_dummy, {
                "x_lengths": input_lengths,
                "speaker_ids": speaker_ids,
                "language_ids": lang_ids
            })
        self._check_inference_outputs(config,
                                      outputs,
                                      input_dummy,
                                      batch_size=batch_size)
Exemple #11
0
    def test_init_from_config(self):
        config = VitsConfig(model_args=VitsArgs(num_chars=32))
        model = Vits.init_from_config(config, verbose=False).to(device)

        config = VitsConfig(model_args=VitsArgs(num_chars=32, num_speakers=2))
        model = Vits.init_from_config(config, verbose=False).to(device)
        self.assertTrue(not hasattr(model, "emb_g"))

        config = VitsConfig(model_args=VitsArgs(
            num_chars=32, num_speakers=2, use_speaker_embedding=True))
        model = Vits.init_from_config(config, verbose=False).to(device)
        self.assertEqual(model.num_speakers, 2)
        self.assertTrue(hasattr(model, "emb_g"))

        config = VitsConfig(model_args=VitsArgs(
            num_chars=32,
            num_speakers=2,
            use_speaker_embedding=True,
            speakers_file=os.path.join(get_tests_data_path(), "ljspeech",
                                       "speakers.json"),
        ))
        model = Vits.init_from_config(config, verbose=False).to(device)
        self.assertEqual(model.num_speakers, 10)
        self.assertTrue(hasattr(model, "emb_g"))

        config = VitsConfig(model_args=VitsArgs(
            num_chars=32,
            use_d_vector_file=True,
            d_vector_dim=256,
            d_vector_file=os.path.join(get_tests_data_path(),
                                       "dummy_speakers.json"),
        ))
        model = Vits.init_from_config(config, verbose=False).to(device)
        self.assertTrue(model.num_speakers == 1)
        self.assertTrue(not hasattr(model, "emb_g"))
        self.assertTrue(model.embedded_speaker_dim == config.d_vector_dim)
Exemple #12
0
 def test_multilingual_inference(self):
     num_speakers = 10
     num_langs = 3
     args = VitsArgs(language_ids_file=LANG_FILE,
                     use_language_embedding=True,
                     spec_segment_size=10)
     config = VitsConfig(num_speakers=num_speakers,
                         use_speaker_embedding=True,
                         model_args=args)
     input_dummy = torch.randint(0, 24, (1, 128)).long().to(device)
     speaker_ids = torch.randint(0, num_speakers, (1, )).long().to(device)
     lang_ids = torch.randint(0, num_langs, (1, )).long().to(device)
     model = Vits(config).to(device)
     _ = model.inference(input_dummy, {
         "speaker_ids": speaker_ids,
         "language_ids": lang_ids
     })
Exemple #13
0
    def test_secl_forward(self):
        num_speakers = 10
        num_langs = 3
        batch_size = 2

        speaker_encoder_config = load_config(SPEAKER_ENCODER_CONFIG)
        speaker_encoder_config.model_params["use_torch_spec"] = True
        speaker_encoder = setup_encoder_model(speaker_encoder_config).to(
            device)
        speaker_manager = SpeakerManager()
        speaker_manager.encoder = speaker_encoder

        args = VitsArgs(
            language_ids_file=LANG_FILE,
            use_language_embedding=True,
            spec_segment_size=10,
            use_speaker_encoder_as_loss=True,
        )
        config = VitsConfig(num_speakers=num_speakers,
                            use_speaker_embedding=True,
                            model_args=args)
        config.audio.sample_rate = 16000

        input_dummy, input_lengths, _, spec, spec_lengths, waveform = self._create_inputs(
            config, batch_size=batch_size)
        speaker_ids = torch.randint(0, num_speakers,
                                    (batch_size, )).long().to(device)
        lang_ids = torch.randint(0, num_langs,
                                 (batch_size, )).long().to(device)

        model = Vits(config, speaker_manager=speaker_manager).to(device)
        output_dict = model.forward(
            input_dummy,
            input_lengths,
            spec,
            spec_lengths,
            waveform,
            aux_input={
                "speaker_ids": speaker_ids,
                "language_ids": lang_ids
            },
        )
        self._check_forward_outputs(config, output_dict,
                                    speaker_encoder_config)
Exemple #14
0
    def test_multispeaker_forward(self):
        num_speakers = 10

        config = VitsConfig(num_speakers=num_speakers,
                            use_speaker_embedding=True)
        config.model_args.spec_segment_size = 10

        input_dummy, input_lengths, _, spec, spec_lengths, waveform = self._create_inputs(
            config)
        speaker_ids = torch.randint(0, num_speakers, (8, )).long().to(device)

        model = Vits(config).to(device)
        output_dict = model.forward(input_dummy,
                                    input_lengths,
                                    spec,
                                    spec_lengths,
                                    waveform,
                                    aux_input={"speaker_ids": speaker_ids})
        self._check_forward_outputs(config, output_dict)
Exemple #15
0
    def test_train_eval_log(self):
        batch_size = 2
        config = VitsConfig(
            model_args=VitsArgs(num_chars=32, spec_segment_size=10))
        model = Vits.init_from_config(config, verbose=False).to(device)
        model.run_data_dep_init = False
        model.train()
        batch = self._create_batch(config, batch_size)
        logger = TensorboardLogger(log_dir=os.path.join(
            get_tests_output_path(), "dummy_vits_logs"),
                                   model_name="vits_test_train_log")
        criterion = model.get_criterion()
        criterion = [criterion[0].to(device), criterion[1].to(device)]
        outputs = [None] * 2
        outputs[0], _ = model.train_step(batch, criterion, 0)
        outputs[1], _ = model.train_step(batch, criterion, 1)
        model.train_log(batch, outputs, logger, None, 1)

        model.eval_log(batch, outputs, logger, None, 1)
        logger.finish()
Exemple #16
0
    def test_inference(self):
        num_speakers = 0
        config = VitsConfig(num_speakers=num_speakers,
                            use_speaker_embedding=True)
        model = Vits(config).to(device)

        batch_size = 1
        input_dummy, *_ = self._create_inputs(config, batch_size=batch_size)
        outputs = model.inference(input_dummy)
        self._check_inference_outputs(config,
                                      outputs,
                                      input_dummy,
                                      batch_size=batch_size)

        batch_size = 2
        input_dummy, input_lengths, *_ = self._create_inputs(
            config, batch_size=batch_size)
        outputs = model.inference(input_dummy,
                                  aux_input={"x_lengths": input_lengths})
        self._check_inference_outputs(config,
                                      outputs,
                                      input_dummy,
                                      batch_size=batch_size)
Exemple #17
0
 def test_d_vector_forward(self):
     batch_size = 2
     args = VitsArgs(
         spec_segment_size=10,
         num_chars=32,
         use_d_vector_file=True,
         d_vector_dim=256,
         d_vector_file=os.path.join(get_tests_data_path(),
                                    "dummy_speakers.json"),
     )
     config = VitsConfig(model_args=args)
     model = Vits.init_from_config(config, verbose=False).to(device)
     model.train()
     input_dummy, input_lengths, _, spec, spec_lengths, waveform = self._create_inputs(
         config, batch_size=batch_size)
     d_vectors = torch.randn(batch_size, 256).to(device)
     output_dict = model.forward(input_dummy,
                                 input_lengths,
                                 spec,
                                 spec_lengths,
                                 waveform,
                                 aux_input={"d_vectors": d_vectors})
     self._check_forward_outputs(config, output_dict)
Exemple #18
0
 def test_get_criterion(self):
     config = VitsConfig(VitsArgs(num_chars=32))
     model = Vits.init_from_config(config, verbose=False).to(device)
     criterion = model.get_criterion()
     self.assertTrue(criterion is not None)
Exemple #19
0
config = VitsConfig(
    model_args=vitsArgs,
    audio=audio_config,
    run_name="vits_vctk",
    use_speaker_embedding=True,
    batch_size=32,
    eval_batch_size=16,
    batch_group_size=0,
    num_loader_workers=4,
    num_eval_loader_workers=4,
    run_eval=True,
    test_delay_epochs=-1,
    epochs=1000,
    text_cleaner="multilingual_cleaners",
    use_phonemes=False,
    phoneme_language="en-us",
    phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
    compute_input_seq_cache=True,
    print_step=25,
    use_language_weighted_sampler=True,
    print_eval=False,
    mixed_precision=False,
    sort_by_audio_len=True,
    min_seq_len=32 * 256 * 4,
    max_seq_len=160000,
    output_path=output_path,
    datasets=dataset_config,
    characters={
        "pad": "_",
        "eos": "&",
        "bos": "*",
        "characters":
        "!¡'(),-.:;¿?abcdefghijklmnopqrstuvwxyzµßàáâäåæçèéêëìíîïñòóôöùúûüąćęłńœśşźżƒабвгдежзийклмнопрстуфхцчшщъыьэюяёєіїґӧ «°±µ»$%&‘’‚“`”„",
        "punctuations": "!¡'(),-.:;¿? ",
        "phonemes": None,
        "unique": True,
    },
    test_sentences=[
        [
            "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.",
            "mary_ann",
            None,
            "en_US",
        ],
        [
            "Il m'a fallu beaucoup de temps pour d\u00e9velopper une voix, et maintenant que je l'ai, je ne vais pas me taire.",
            "ezwa",
            None,
            "fr_FR",
        ],
        [
            "Ich finde, dieses Startup ist wirklich unglaublich.", "eva_k",
            None, "de_DE"
        ],
        [
            "Я думаю, что этот стартап действительно удивительный.", "oblomov",
            None, "ru_RU"
        ],
    ],
)
Exemple #20
0
)

vitsArgs = VitsArgs(use_speaker_embedding=True, )

config = VitsConfig(
    model_args=vitsArgs,
    audio=audio_config,
    run_name="vits_vctk",
    batch_size=32,
    eval_batch_size=16,
    batch_group_size=5,
    num_loader_workers=4,
    num_eval_loader_workers=4,
    run_eval=True,
    test_delay_epochs=-1,
    epochs=1000,
    text_cleaner="english_cleaners",
    use_phonemes=True,
    phoneme_language="en",
    phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
    compute_input_seq_cache=True,
    print_step=25,
    print_eval=False,
    mixed_precision=True,
    max_text_len=325,  # change this if you have a larger VRAM than 16GB
    output_path=output_path,
    datasets=[dataset_config],
)

# INITIALIZE THE AUDIO PROCESSOR
# Audio processor is used for feature extraction and audio I/O.
# It mainly serves to the dataloader and the training loggers.
Exemple #21
0
    signal_norm=False,
    do_amp_to_db_linear=False,
)

config = VitsConfig(
    audio=audio_config,
    run_name="vits_ljspeech",
    batch_size=48,
    eval_batch_size=16,
    batch_group_size=5,
    num_loader_workers=4,
    num_eval_loader_workers=4,
    run_eval=True,
    test_delay_epochs=-1,
    epochs=1000,
    text_cleaner="english_cleaners",
    use_phonemes=True,
    phoneme_language="en-us",
    phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
    compute_input_seq_cache=True,
    print_step=25,
    print_eval=True,
    mixed_precision=True,
    max_seq_len=500000,
    output_path=output_path,
    datasets=[dataset_config],
)

# init audio processor
ap = AudioProcessor(**config.audio.to_dict())
    meta_file_val="metadata.csv",
    path="tests/data/ljspeech",
    language="pt-br",
)

config = VitsConfig(
    batch_size=2,
    eval_batch_size=2,
    num_loader_workers=0,
    num_eval_loader_workers=0,
    text_cleaner="multilingual_cleaners",
    use_phonemes=False,
    phoneme_cache_path="tests/data/ljspeech/phoneme_cache/",
    run_eval=True,
    test_delay_epochs=-1,
    epochs=1,
    print_step=1,
    print_eval=True,
    test_sentences=[
        ["Be a voice, not an echo.", "ljspeech-0", None, "en"],
        ["Be a voice, not an echo.", "ljspeech-1", None, "pt-br"],
    ],
    datasets=[
        dataset_config_en, dataset_config_en, dataset_config_en,
        dataset_config_pt
    ],
)
# set audio config
config.audio.do_trim_silence = True
config.audio.trim_db = 60

# active multilingual mode
Exemple #23
0
from tests import get_device_id, get_tests_output_path, run_cli
from TTS.tts.configs.vits_config import VitsConfig

config_path = os.path.join(get_tests_output_path(), "test_model_config.json")
output_path = os.path.join(get_tests_output_path(), "train_outputs")

config = VitsConfig(
    batch_size=2,
    eval_batch_size=2,
    num_loader_workers=0,
    num_eval_loader_workers=0,
    text_cleaner="english_cleaners",
    use_phonemes=True,
    use_espeak_phonemes=True,
    phoneme_language="en-us",
    phoneme_cache_path="tests/data/ljspeech/phoneme_cache/",
    run_eval=True,
    test_delay_epochs=-1,
    epochs=1,
    print_step=1,
    print_eval=True,
    test_sentences=[
        ["Be a voice, not an echo.", "ljspeech"],
    ],
)
# set audio config
config.audio.do_trim_silence = True
config.audio.trim_db = 60

# active multispeaker d-vec mode
config.model_args.use_speaker_embedding = True
Exemple #24
0
vitsArgs = VitsArgs(use_speaker_embedding=True, )

config = VitsConfig(
    model_args=vitsArgs,
    audio=audio_config,
    run_name="vits_vctk",
    batch_size=32,
    eval_batch_size=16,
    batch_group_size=5,
    num_loader_workers=4,
    num_eval_loader_workers=4,
    run_eval=True,
    test_delay_epochs=-1,
    epochs=1000,
    text_cleaner="english_cleaners",
    use_phonemes=True,
    phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
    compute_input_seq_cache=True,
    print_step=25,
    print_eval=False,
    mixed_precision=True,
    sort_by_audio_len=True,
    min_seq_len=32 * 256 * 4,
    max_seq_len=1500000,
    output_path=output_path,
    datasets=[dataset_config],
)

# init audio processor
ap = AudioProcessor(**config.audio.to_dict())