Ejemplo n.º 1
0
import tgt

np.random.seed(42)
tf.random.set_seed(42)
dynamic_memory_allocation()

parser = basic_train_parser()
args = parser.parse_args()

config = Config(config_path=args.config, asr=True)
config_dict = config.config
config.create_remove_dirs(clear_dir=args.clear_dir,
                          clear_logs=args.clear_logs,
                          clear_weights=args.clear_weights)
config.dump_config()
config.print_config()

model = config.get_model()
config.compile_model(model)

data_handler = ASRDataset.from_config(config,
                                      tokenizer=model.text_pipeline.tokenizer,
                                      kind='valid')
dataset = data_handler.get_dataset(
    bucket_batch_sizes=config_dict['bucket_batch_sizes'],
    bucket_boundaries=config_dict['bucket_boundaries'],
    shuffle=False)

# create logger and checkpointer and restore latest model
summary_manager = SummaryManager(model=model,
                                 log_dir=config.log_dir,
Ejemplo n.º 2
0
        tag=f'Validation/predicted_mel_{fname[0].numpy().decode("utf-8")}')
    # residual = abs(model_out['mel_linear'] - model_out['final_output'])
    # summary_manager.display_mel(mel=residual[0], tag=f'Validation/conv-linear_residual')
    summary_manager.display_mel(
        mel=val_mel[0],
        tag=f'Validation/target_mel_{fname[0].numpy().decode("utf-8")}')
    return val_loss['loss']


config_manager = Config(config_path=args.config, model_kind='autoregressive')
config = config_manager.config
config_manager.create_remove_dirs(clear_dir=args.clear_dir,
                                  clear_logs=args.clear_logs,
                                  clear_weights=args.clear_weights)
config_manager.dump_config()
config_manager.print_config()
#

# get model, prepare data for model, create datasets
model = config_manager.get_model()
config_manager.compile_model(model)
data_prep = AutoregressivePreprocessor.from_config(
    config_manager, tokenizer=model.text_pipeline.tokenizer)
train_data_handler = TextMelDataset.from_config(config_manager,
                                                preprocessor=data_prep,
                                                kind='train')
valid_data_handler = TextMelDataset.from_config(config_manager,
                                                preprocessor=data_prep,
                                                kind='valid')

train_dataset = train_data_handler.get_dataset(