Ejemplo n.º 1
0
def accent_apply(base_dir: str, prep_name: str, text_name: str):
    prep_dir = get_prepared_dir(base_dir, prep_name, create=False)
    text_dir = get_text_dir(prep_dir, text_name, create=False)
    if not os.path.isdir(text_dir):
        print("Please add text first.")
    else:
        print("Applying accents...")
        updated_sentences = infer_accents_apply(
            sentences=load_text_csv(text_dir),
            accented_symbols=_load_accents_csv(text_dir),
            accent_ids=load_prep_accents_ids(prep_dir),
        )
        print("\n" + updated_sentences.get_formatted(
            symbol_id_dict=load_text_symbol_converter(text_dir),
            accent_id_dict=load_prep_accents_ids(prep_dir)))
        _save_text_csv(text_dir, updated_sentences)
        _check_for_unknown_symbols(base_dir, prep_name, text_name)
Ejemplo n.º 2
0
def convert_model(base_dir: str, prep_name: str, model_path: str,
                  custom_hparams: Optional[Dict[str, str]]):
    prep_dir = get_prepared_dir(base_dir, prep_name)

    convert_v1_to_v2_model(old_model_path=model_path,
                           custom_hparams=custom_hparams,
                           speakers=load_prep_speakers_json(prep_dir),
                           accents=load_prep_accents_ids(prep_dir),
                           symbols=load_prep_symbol_converter(prep_dir))
Ejemplo n.º 3
0
def get_infer_sentences(base_dir: str, prep_name: str,
                        text_name: str) -> InferSentenceList:
    prep_dir = get_prepared_dir(base_dir, prep_name, create=False)
    text_dir = get_text_dir(prep_dir, text_name, create=False)
    if not os.path.isdir(text_dir):
        print(f"The text '{text_name}' doesn't exist.")
        assert False
    result = InferSentenceList.from_sentences(
        sentences=load_text_csv(text_dir),
        accents=load_prep_accents_ids(prep_dir),
        symbols=load_text_symbol_converter(text_dir))

    return result
Ejemplo n.º 4
0
def _accent_template(base_dir: str, prep_name: str, text_name: str):
    prep_dir = get_prepared_dir(base_dir, prep_name, create=False)
    text_dir = get_text_dir(prep_dir, text_name, create=False)
    if not os.path.isdir(text_dir):
        print("Please add text first.")
    else:
        print("Updating accent template...")
        accented_symbol_list = infer_accents_template(
            sentences=load_text_csv(text_dir),
            text_symbols=load_text_symbol_converter(text_dir),
            accent_ids=load_prep_accents_ids(prep_dir),
        )
        _save_accents_csv(text_dir, accented_symbol_list)
Ejemplo n.º 5
0
def normalize_text(base_dir: str, prep_name: str, text_name: str):
    prep_dir = get_prepared_dir(base_dir, prep_name, create=False)
    text_dir = get_text_dir(prep_dir, text_name, create=False)
    if not os.path.isdir(text_dir):
        print("Please add text first.")
    else:
        print("Normalizing text...")
        symbol_ids, updated_sentences = infer_norm(
            sentences=load_text_csv(text_dir),
            text_symbols=load_text_symbol_converter(text_dir))
        print("\n" + updated_sentences.get_formatted(
            symbol_id_dict=symbol_ids,
            accent_id_dict=load_prep_accents_ids(prep_dir)))
        _save_text_csv(text_dir, updated_sentences)
        save_text_symbol_converter(text_dir, symbol_ids)
        _accent_template(base_dir, prep_name, text_name)
        _check_for_unknown_symbols(base_dir, prep_name, text_name)
Ejemplo n.º 6
0
def add_text(base_dir: str, prep_name: str, text_name: str, filepath: str,
             lang: Language):
    prep_dir = get_prepared_dir(base_dir, prep_name, create=False)
    if not os.path.isdir(prep_dir):
        print("Please prepare data first.")
    else:
        print("Adding text...")
        symbol_ids, data = infer_add(
            text=read_text(filepath),
            lang=lang,
        )
        print(
            "\n" +
            data.get_formatted(symbol_id_dict=symbol_ids,
                               accent_id_dict=load_prep_accents_ids(prep_dir)))
        text_dir = get_text_dir(prep_dir, text_name, create=True)
        _save_text_csv(text_dir, data)
        save_text_symbol_converter(text_dir, symbol_ids)
        _accent_template(base_dir, prep_name, text_name)
        _check_for_unknown_symbols(base_dir, prep_name, text_name)
Ejemplo n.º 7
0
def ipa_convert_text(base_dir: str,
                     prep_name: str,
                     text_name: str,
                     ignore_tones: bool = False,
                     ignore_arcs: bool = True):
    prep_dir = get_prepared_dir(base_dir, prep_name, create=False)
    text_dir = get_text_dir(prep_dir, text_name, create=False)
    if not os.path.isdir(text_dir):
        print("Please add text first.")
    else:
        print("Converting text to IPA...")
        symbol_ids, updated_sentences = infer_convert_ipa(
            sentences=load_text_csv(text_dir),
            text_symbols=load_text_symbol_converter(text_dir),
            ignore_tones=ignore_tones,
            ignore_arcs=ignore_arcs)
        print("\n" + updated_sentences.get_formatted(
            symbol_id_dict=symbol_ids,
            accent_id_dict=load_prep_accents_ids(prep_dir)))
        _save_text_csv(text_dir, updated_sentences)
        save_text_symbol_converter(text_dir, symbol_ids)
        _accent_template(base_dir, prep_name, text_name)
        _check_for_unknown_symbols(base_dir, prep_name, text_name)
Ejemplo n.º 8
0
def map_text(base_dir: str,
             prep_name: str,
             text_name: str,
             symbols_map_path: str,
             ignore_arcs: bool = True):
    prep_dir = get_prepared_dir(base_dir, prep_name, create=False)
    text_dir = get_text_dir(prep_dir, text_name, create=False)
    if not os.path.isdir(text_dir):
        print("Please add text first.")
    else:
        symbol_ids, updated_sentences = sents_map(
            sentences=load_text_csv(text_dir),
            text_symbols=load_text_symbol_converter(text_dir),
            symbols_map=SymbolsMap.load(symbols_map_path),
            ignore_arcs=ignore_arcs)

        print("\n" + updated_sentences.get_formatted(
            symbol_id_dict=symbol_ids,
            accent_id_dict=load_prep_accents_ids(prep_dir)))
        _save_text_csv(text_dir, updated_sentences)
        save_text_symbol_converter(text_dir, symbol_ids)
        _accent_template(base_dir, prep_name, text_name)
        _check_for_unknown_symbols(base_dir, prep_name, text_name)
Ejemplo n.º 9
0
def eval_checkpoints_main(base_dir: str, train_name: str, select: int,
                          min_it: int, max_it: int):
    train_dir = get_train_dir(base_dir, train_name, create=False)
    assert os.path.isdir(train_dir)

    prep_name = load_prep_name(train_dir)
    prep_dir = get_prepared_dir(base_dir, prep_name)

    symbols_conv = load_prep_symbol_converter(prep_dir)
    speakers = load_prep_speakers_json(prep_dir)
    accents = load_prep_accents_ids(prep_dir)

    logger = prepare_logger()

    eval_checkpoints(custom_hparams=None,
                     checkpoint_dir=get_checkpoints_dir(train_dir),
                     select=select,
                     min_it=min_it,
                     max_it=max_it,
                     n_symbols=len(symbols_conv),
                     n_speakers=len(speakers),
                     n_accents=len(accents),
                     valset=load_valset(train_dir),
                     logger=logger)
Ejemplo n.º 10
0
def train_main(base_dir: str,
               train_name: str,
               prep_name: str,
               warm_start_train_name: Optional[str] = None,
               warm_start_checkpoint: Optional[int] = None,
               test_size: float = 0.01,
               validation_size: float = 0.05,
               custom_hparams: Optional[Dict[str, str]] = None,
               split_seed: int = 1234,
               weights_train_name: Optional[str] = None,
               weights_checkpoint: Optional[int] = None,
               use_weights_map: Optional[bool] = None,
               map_from_speaker: Optional[str] = None):
    prep_dir = get_prepared_dir(base_dir, prep_name)
    train_dir = get_train_dir(base_dir, train_name, create=True)
    logs_dir = get_train_logs_dir(train_dir)

    taco_logger = Tacotron2Logger(logs_dir)
    logger = prepare_logger(get_train_log_file(logs_dir), reset=True)
    checkpoint_logger = prepare_logger(
        log_file_path=get_train_checkpoints_log_file(logs_dir),
        logger=logging.getLogger("checkpoint-logger"),
        reset=True)

    save_prep_name(train_dir, prep_name)

    trainset, valset = split_dataset(prep_dir=prep_dir,
                                     train_dir=train_dir,
                                     test_size=test_size,
                                     validation_size=validation_size,
                                     split_seed=split_seed)

    weights_model = try_load_checkpoint(base_dir=base_dir,
                                        train_name=weights_train_name,
                                        checkpoint=weights_checkpoint,
                                        logger=logger)

    weights_map = None
    if use_weights_map is not None and use_weights_map:
        weights_train_dir = get_train_dir(base_dir, weights_train_name, False)
        weights_prep_name = load_prep_name(weights_train_dir)
        weights_map = load_weights_map(prep_dir, weights_prep_name)

    warm_model = try_load_checkpoint(base_dir=base_dir,
                                     train_name=warm_start_train_name,
                                     checkpoint=warm_start_checkpoint,
                                     logger=logger)

    save_callback = partial(
        save_checkpoint,
        save_checkpoint_dir=get_checkpoints_dir(train_dir),
        logger=logger,
    )

    train(
        custom_hparams=custom_hparams,
        taco_logger=taco_logger,
        symbols=load_prep_symbol_converter(prep_dir),
        speakers=load_prep_speakers_json(prep_dir),
        accents=load_prep_accents_ids(prep_dir),
        trainset=trainset,
        valset=valset,
        save_callback=save_callback,
        weights_map=weights_map,
        weights_checkpoint=weights_model,
        warm_model=warm_model,
        map_from_speaker_name=map_from_speaker,
        logger=logger,
        checkpoint_logger=checkpoint_logger,
    )