def __init__(self, *args, **kwargs):
     super(BaselineDurIANBackwardPassJoinTest, self).__init__(*args, **kwargs)
     self.CLASS_TYPE = BaselineDurIAN
     with open('../configs/baseline.json') as f:
         self.config = json.load(f)
     self.config['n_symbols'] = 100
     self.config['solve_alignments_as_mse'] = True
     self.config['solve_alignments_as_bce'] = True
     self.criterion = DurIANLoss(self.config)
Пример #2
0
def run(TTS_FRONTEND, TTS_CONFIG, args):
    show_message('Initializing data loaders...', verbose=args.verbose)
    batch_collate = BatchCollate(TTS_CONFIG)
    train_dataset = Dataset(TTS_CONFIG, training=True)
    train_dataloader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=TTS_CONFIG['batch_size'],
        shuffle=True,
        collate_fn=batch_collate,
        drop_last=True)
    val_dataset = Dataset(TTS_CONFIG, training=False)
    val_dataloader = torch.utils.data.DataLoader(
        dataset=val_dataset,
        batch_size=TTS_CONFIG['batch_size'],
        shuffle=True,
        collate_fn=batch_collate,
        drop_last=False)

    show_message('Initializing model...', verbose=args.verbose)
    TTS_CONFIG['n_symbols'] = len(batch_collate.text_frontend.SYMBOLS)
    model = TTS_FRONTEND(TTS_CONFIG)
    model.cuda()

    show_message('Initializing optimizers, loss and schedullers...',
                 verbose=args.verbose)
    backbone_model_opt = torch.optim.Adam(
        params=model.backbone_model.parameters(),
        lr=TTS_CONFIG['learning_rate'],
        weight_decay=1e-6)
    backbone_model_lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer=backbone_model_opt, mode='min', factor=0.98, patience=500)
    duration_model_opt = torch.optim.Adam(
        params=model.duration_model.parameters(),
        lr=TTS_CONFIG['learning_rate'])
    duration_model_lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer=duration_model_opt, mode='min', factor=0.98, patience=1000)
    optimizers = {
        'backbone_model_opt': backbone_model_opt,
        'duration_model_opt': duration_model_opt
    }
    criterion = DurIANLoss(TTS_CONFIG)

    show_message('Initializing logger and trainer...', verbose=args.verbose)
    logger = Logger(TTS_CONFIG['logdir'])
    trainer = ModelTrainer(config=TTS_CONFIG,
                           optimizers=optimizers,
                           logger=logger,
                           criterion=criterion)

    show_message('Start training...', verbose=args.verbose)
    try:
        iteration = 0
        for _ in range(TTS_CONFIG['n_epoch']):
            for batch in train_dataloader:
                # Training step
                batch = model.parse_batch(batch)
                losses, stats = trainer.compute_loss(model,
                                                     batch,
                                                     training=True)
                grad_norm = trainer.run_backward(model, losses=losses)
                stats.update(grad_norm)
                stats.update(trainer.get_current_lrs())
                trainer.log_training(iteration, stats, verbose=args.verbose)
                backbone_model_lr_scheduler.step(losses[0])
                duration_model_lr_scheduler.step(losses[1])

                # Evaluation step
                trainer.validate(iteration,
                                 model,
                                 val_dataloader,
                                 verbose=args.verbose)
                trainer.save_checkpoint(iteration, model)
                iteration += 1
    except KeyboardInterrupt:
        print('KeyboardInterrupt: training is stopped.')
        return