config.use_speaker_embedding = False

# active multispeaker d-vec mode
config.model_args.use_d_vector_file = True
config.use_d_vector_file = True
config.model_args.d_vector_file = "tests/data/ljspeech/speakers.json"
config.d_vector_file = "tests/data/ljspeech/speakers.json"
config.model_args.d_vector_dim = 256
config.d_vector_dim = 256

# duration predictor
config.model_args.use_sdp = True
config.use_sdp = True

# activate language and speaker samplers
config.use_language_weighted_sampler = True
config.language_weighted_sampler_alpha = 10
config.use_speaker_weighted_sampler = True
config.speaker_weighted_sampler_alpha = 5

config.save_json(config_path)

# train the model for one epoch
command_train = (
    f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path {config_path} "
    f"--coqpit.output_path {output_path} "
    "--coqpit.test_delay_epochs 0")
run_cli(command_train)

# Find latest folder
continue_path = max(glob.glob(os.path.join(output_path, "*/")),
config.use_speaker_embedding = False

# active multispeaker d-vec mode
config.model_args.use_d_vector_file = True
config.use_d_vector_file = True
config.model_args.d_vector_file = "tests/data/ljspeech/speakers.json"
config.d_vector_file = "tests/data/ljspeech/speakers.json"
config.model_args.d_vector_dim = 256
config.d_vector_dim = 256

# duration predictor
config.model_args.use_sdp = True
config.use_sdp = True

# deactivate language sampler
config.use_language_weighted_sampler = False

config.save_json(config_path)

# train the model for one epoch
command_train = (
    f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path {config_path} "
    f"--coqpit.output_path {output_path} "
    "--coqpit.test_delay_epochs 0")
run_cli(command_train)

# Find latest folder
continue_path = max(glob.glob(os.path.join(output_path, "*/")),
                    key=os.path.getmtime)

# restore the model and continue training for one more epoch