Example #1
0
def create_or_update_weights_map_main(base_dir: str,
                                      prep_name: str,
                                      weights_prep_name: str,
                                      template_map: Optional[str] = None):
    prep_dir = get_prepared_dir(base_dir, prep_name)
    assert os.path.isdir(prep_dir)
    orig_prep_dir = get_prepared_dir(base_dir, weights_prep_name)
    assert os.path.isdir(orig_prep_dir)

    logger = init_logger()
    add_console_out_to_logger(logger)
    logger.info(f"Creating/updating weights map for {weights_prep_name}...")

    if template_map is not None:
        _template_map = SymbolsMap.load(template_map)
    else:
        _template_map = None

    if weights_map_exists(prep_dir, weights_prep_name):
        existing_map = load_weights_map(prep_dir, weights_prep_name)
    else:
        existing_map = None

    weights_map, symbols = create_or_update_weights_map(
        orig=load_prep_symbol_converter(orig_prep_dir).get_all_symbols(),
        dest=load_prep_symbol_converter(prep_dir).get_all_symbols(),
        existing_map=existing_map,
        template_map=_template_map,
    )

    save_weights_map(prep_dir, weights_prep_name, weights_map)
    save_weights_symbols(prep_dir, weights_prep_name, symbols)
Example #2
0
def create_or_update_inference_map_main(base_dir: str,
                                        prep_name: str,
                                        template_map: Optional[str] = None):
    logger = init_logger()
    add_console_out_to_logger(logger)
    logger.info("Creating/updating inference map...")
    prep_dir = get_prepared_dir(base_dir, prep_name)
    assert os.path.isdir(prep_dir)

    all_symbols = get_all_symbols(prep_dir)

    if template_map is not None:
        _template_map = SymbolsMap.load(template_map)
    else:
        _template_map = None

    if infer_map_exists(prep_dir):
        existing_map = load_infer_map(prep_dir)
    else:
        existing_map = None

    infer_map, symbols = create_or_update_inference_map(
        orig=load_prep_symbol_converter(prep_dir).get_all_symbols(),
        dest=all_symbols,
        existing_map=existing_map,
        template_map=_template_map,
    )

    save_infer_map(prep_dir, infer_map)
    save_infer_symbols(prep_dir, symbols)
Example #3
0
def convert_model(base_dir: str, prep_name: str, model_path: str,
                  custom_hparams: Optional[Dict[str, str]]):
    prep_dir = get_prepared_dir(base_dir, prep_name)

    convert_v1_to_v2_model(old_model_path=model_path,
                           custom_hparams=custom_hparams,
                           speakers=load_prep_speakers_json(prep_dir),
                           accents=load_prep_accents_ids(prep_dir),
                           symbols=load_prep_symbol_converter(prep_dir))
Example #4
0
def _check_for_unknown_symbols(base_dir: str, prep_name: str, text_name: str):
    infer_sents = get_infer_sentences(base_dir, prep_name, text_name)

    prep_dir = get_prepared_dir(base_dir, prep_name, create=False)
    logger = prepare_logger()
    unknown_symbols_exist = infer_sents.replace_unknown_symbols(
        model_symbols=load_prep_symbol_converter(prep_dir), logger=logger)

    if unknown_symbols_exist:
        logger.info(
            "Some symbols are not in the prepared dataset symbolset. You need to create an inference map and then apply it to the symbols."
        )
    else:
        logger.info(
            "All symbols are in the prepared dataset symbolset. You can now synthesize this text."
        )
def eval_checkpoints_main(base_dir: str, train_name: str, select: int,
                          min_it: int, max_it: int):
    train_dir = get_train_dir(base_dir, train_name, create=False)
    assert os.path.isdir(train_dir)

    prep_name = load_prep_name(train_dir)
    prep_dir = get_prepared_dir(base_dir, prep_name)

    symbols_conv = load_prep_symbol_converter(prep_dir)
    speakers = load_prep_speakers_json(prep_dir)
    accents = load_prep_accents_ids(prep_dir)

    logger = prepare_logger()

    eval_checkpoints(custom_hparams=None,
                     checkpoint_dir=get_checkpoints_dir(train_dir),
                     select=select,
                     min_it=min_it,
                     max_it=max_it,
                     n_symbols=len(symbols_conv),
                     n_speakers=len(speakers),
                     n_accents=len(accents),
                     valset=load_valset(train_dir),
                     logger=logger)
Example #6
0
def train_main(base_dir: str,
               train_name: str,
               prep_name: str,
               warm_start_train_name: Optional[str] = None,
               warm_start_checkpoint: Optional[int] = None,
               test_size: float = 0.01,
               validation_size: float = 0.05,
               custom_hparams: Optional[Dict[str, str]] = None,
               split_seed: int = 1234,
               weights_train_name: Optional[str] = None,
               weights_checkpoint: Optional[int] = None,
               use_weights_map: Optional[bool] = None,
               map_from_speaker: Optional[str] = None):
    prep_dir = get_prepared_dir(base_dir, prep_name)
    train_dir = get_train_dir(base_dir, train_name, create=True)
    logs_dir = get_train_logs_dir(train_dir)

    taco_logger = Tacotron2Logger(logs_dir)
    logger = prepare_logger(get_train_log_file(logs_dir), reset=True)
    checkpoint_logger = prepare_logger(
        log_file_path=get_train_checkpoints_log_file(logs_dir),
        logger=logging.getLogger("checkpoint-logger"),
        reset=True)

    save_prep_name(train_dir, prep_name)

    trainset, valset = split_dataset(prep_dir=prep_dir,
                                     train_dir=train_dir,
                                     test_size=test_size,
                                     validation_size=validation_size,
                                     split_seed=split_seed)

    weights_model = try_load_checkpoint(base_dir=base_dir,
                                        train_name=weights_train_name,
                                        checkpoint=weights_checkpoint,
                                        logger=logger)

    weights_map = None
    if use_weights_map is not None and use_weights_map:
        weights_train_dir = get_train_dir(base_dir, weights_train_name, False)
        weights_prep_name = load_prep_name(weights_train_dir)
        weights_map = load_weights_map(prep_dir, weights_prep_name)

    warm_model = try_load_checkpoint(base_dir=base_dir,
                                     train_name=warm_start_train_name,
                                     checkpoint=warm_start_checkpoint,
                                     logger=logger)

    save_callback = partial(
        save_checkpoint,
        save_checkpoint_dir=get_checkpoints_dir(train_dir),
        logger=logger,
    )

    train(
        custom_hparams=custom_hparams,
        taco_logger=taco_logger,
        symbols=load_prep_symbol_converter(prep_dir),
        speakers=load_prep_speakers_json(prep_dir),
        accents=load_prep_accents_ids(prep_dir),
        trainset=trainset,
        valset=valset,
        save_callback=save_callback,
        weights_map=weights_map,
        weights_checkpoint=weights_model,
        warm_model=warm_model,
        map_from_speaker_name=map_from_speaker,
        logger=logger,
        checkpoint_logger=checkpoint_logger,
    )