Beispiel #1
0
def main(args):  # pylint: disable=redefined-outer-name
    # pylint: disable=global-variable-undefined
    global meta_data_train, meta_data_eval, speaker_mapping, symbols, phonemes, model_characters
    # Audio processor
    ap = AudioProcessor(**c.audio)

    # setup custom characters if set in config file.
    if "characters" in c.keys():
        symbols, phonemes = make_symbols(**c.characters)

    # DISTRUBUTED
    if num_gpus > 1:
        init_distributed(args.rank, num_gpus, args.group_id,
                         c.distributed["backend"], c.distributed["url"])
    num_chars = len(phonemes) if c.use_phonemes else len(symbols)
    model_characters = phonemes if c.use_phonemes else symbols

    # load data instances
    meta_data_train, meta_data_eval = load_meta_data(c.datasets)

    # set the portion of the data used for training
    if "train_portion" in c.keys():
        meta_data_train = meta_data_train[:int(
            len(meta_data_train) * c.train_portion)]
    if "eval_portion" in c.keys():
        meta_data_eval = meta_data_eval[:int(
            len(meta_data_eval) * c.eval_portion)]

    # parse speakers
    num_speakers, speaker_embedding_dim, speaker_mapping = parse_speakers(
        c, args, meta_data_train, OUT_PATH)

    model = setup_model(num_chars, num_speakers, c, speaker_embedding_dim)

    # scalers for mixed precision training
    scaler = torch.cuda.amp.GradScaler() if c.mixed_precision else None
    scaler_st = torch.cuda.amp.GradScaler(
    ) if c.mixed_precision and c.separate_stopnet else None

    params = set_weight_decay(model, c.wd)
    optimizer = RAdam(params, lr=c.lr, weight_decay=0)
    if c.stopnet and c.separate_stopnet:
        optimizer_st = RAdam(model.decoder.stopnet.parameters(),
                             lr=c.lr,
                             weight_decay=0)
    else:
        optimizer_st = None

    # setup criterion
    criterion = TacotronLoss(c,
                             stopnet_pos_weight=c.stopnet_pos_weight,
                             ga_sigma=0.4)
    if args.restore_path:
        print(f" > Restoring from {os.path.basename(args.restore_path)}...")
        checkpoint = torch.load(args.restore_path, map_location="cpu")
        try:
            print(" > Restoring Model...")
            model.load_state_dict(checkpoint["model"])
            # optimizer restore
            print(" > Restoring Optimizer...")
            optimizer.load_state_dict(checkpoint["optimizer"])
            if "scaler" in checkpoint and c.mixed_precision:
                print(" > Restoring AMP Scaler...")
                scaler.load_state_dict(checkpoint["scaler"])
            if c.reinit_layers:
                raise RuntimeError
        except (KeyError, RuntimeError):
            print(" > Partial model initialization...")
            model_dict = model.state_dict()
            model_dict = set_init_dict(model_dict, checkpoint["model"], c)
            # torch.save(model_dict, os.path.join(OUT_PATH, 'state_dict.pt'))
            # print("State Dict saved for debug in: ", os.path.join(OUT_PATH, 'state_dict.pt'))
            model.load_state_dict(model_dict)
            del model_dict

        for group in optimizer.param_groups:
            group["lr"] = c.lr
        print(" > Model restored from step %d" % checkpoint["step"],
              flush=True)
        args.restore_step = checkpoint["step"]
    else:
        args.restore_step = 0

    if use_cuda:
        model.cuda()
        criterion.cuda()

    # DISTRUBUTED
    if num_gpus > 1:
        model = apply_gradient_allreduce(model)

    if c.noam_schedule:
        scheduler = NoamLR(optimizer,
                           warmup_steps=c.warmup_steps,
                           last_epoch=args.restore_step - 1)
    else:
        scheduler = None

    num_params = count_parameters(model)
    print("\n > Model has {} parameters".format(num_params), flush=True)

    if args.restore_step == 0 or not args.best_path:
        best_loss = float("inf")
        print(" > Starting with inf best loss.")
    else:
        print(" > Restoring best loss from "
              f"{os.path.basename(args.best_path)} ...")
        best_loss = torch.load(args.best_path,
                               map_location="cpu")["model_loss"]
        print(f" > Starting with loaded last best loss {best_loss}.")
    keep_all_best = c.get("keep_all_best", False)
    keep_after = c.get("keep_after", 10000)  # void if keep_all_best False

    # define data loaders
    train_loader = setup_loader(ap,
                                model.decoder.r,
                                is_val=False,
                                verbose=True)
    eval_loader = setup_loader(ap, model.decoder.r, is_val=True)

    global_step = args.restore_step
    for epoch in range(0, c.epochs):
        c_logger.print_epoch_start(epoch, c.epochs)
        # set gradual training
        if c.gradual_training is not None:
            r, c.batch_size = gradual_training_scheduler(global_step, c)
            c.r = r
            model.decoder.set_r(r)
            if c.bidirectional_decoder:
                model.decoder_backward.set_r(r)
            train_loader.dataset.outputs_per_step = r
            eval_loader.dataset.outputs_per_step = r
            train_loader = setup_loader(ap,
                                        model.decoder.r,
                                        is_val=False,
                                        dataset=train_loader.dataset)
            eval_loader = setup_loader(ap,
                                       model.decoder.r,
                                       is_val=True,
                                       dataset=eval_loader.dataset)
            print("\n > Number of output frames:", model.decoder.r)
        # train one epoch
        train_avg_loss_dict, global_step = train(
            train_loader,
            model,
            criterion,
            optimizer,
            optimizer_st,
            scheduler,
            ap,
            global_step,
            epoch,
            scaler,
            scaler_st,
        )
        # eval one epoch
        eval_avg_loss_dict = evaluate(eval_loader, model, criterion, ap,
                                      global_step, epoch)
        c_logger.print_epoch_end(epoch, eval_avg_loss_dict)
        target_loss = train_avg_loss_dict["avg_postnet_loss"]
        if c.run_eval:
            target_loss = eval_avg_loss_dict["avg_postnet_loss"]
        best_loss = save_best_model(
            target_loss,
            best_loss,
            model,
            optimizer,
            global_step,
            epoch,
            c.r,
            OUT_PATH,
            model_characters,
            keep_all_best=keep_all_best,
            keep_after=keep_after,
            scaler=scaler.state_dict() if c.mixed_precision else None,
        )
Beispiel #2
0
def main(args):  # pylint: disable=redefined-outer-name
    # pylint: disable=global-variable-undefined
    global meta_data_train, meta_data_eval, symbols, phonemes
    # Audio processor
    ap = AudioProcessor(**c.audio)
    if 'characters' in c.keys():
        symbols, phonemes = make_symbols(**c.characters)

    # DISTRUBUTED
    if num_gpus > 1:
        init_distributed(args.rank, num_gpus, args.group_id,
                         c.distributed["backend"], c.distributed["url"])
    num_chars = len(phonemes) if c.use_phonemes else len(symbols)

    # load data instances
    meta_data_train, meta_data_eval = load_meta_data(c.datasets)

    # set the portion of the data used for training
    if 'train_portion' in c.keys():
        meta_data_train = meta_data_train[:int(
            len(meta_data_train) * c.train_portion)]
    if 'eval_portion' in c.keys():
        meta_data_eval = meta_data_eval[:int(
            len(meta_data_eval) * c.eval_portion)]

    # parse speakers
    num_speakers, speaker_embedding_dim, speaker_mapping = parse_speakers(
        c, args, meta_data_train, OUT_PATH)

    model = setup_model(num_chars, num_speakers, c, speaker_embedding_dim)

    # scalers for mixed precision training
    scaler = torch.cuda.amp.GradScaler() if c.mixed_precision else None
    scaler_st = torch.cuda.amp.GradScaler(
    ) if c.mixed_precision and c.separate_stopnet else None

    params = set_weight_decay(model, c.wd)
    optimizer = RAdam(params, lr=c.lr, weight_decay=0)
    if c.stopnet and c.separate_stopnet:
        optimizer_st = RAdam(model.decoder.stopnet.parameters(),
                             lr=c.lr,
                             weight_decay=0)
    else:
        optimizer_st = None

    # setup criterion
    criterion = TacotronLoss(c, stopnet_pos_weight=10.0, ga_sigma=0.4)

    if args.restore_path:
        checkpoint = torch.load(args.restore_path, map_location='cpu')
        try:
            print(" > Restoring Model.")
            model.load_state_dict(checkpoint['model'])
            # optimizer restore
            print(" > Restoring Optimizer.")
            optimizer.load_state_dict(checkpoint['optimizer'])
            if "scaler" in checkpoint and c.mixed_precision:
                print(" > Restoring AMP Scaler...")
                scaler.load_state_dict(checkpoint["scaler"])
            if c.reinit_layers:
                raise RuntimeError
        except KeyError:
            print(" > Partial model initialization.")
            model_dict = model.state_dict()
            model_dict = set_init_dict(model_dict, checkpoint['model'], c)
            # torch.save(model_dict, os.path.join(OUT_PATH, 'state_dict.pt'))
            # print("State Dict saved for debug in: ", os.path.join(OUT_PATH, 'state_dict.pt'))
            model.load_state_dict(model_dict)
            del model_dict

        for group in optimizer.param_groups:
            group['lr'] = c.lr
        print(" > Model restored from step %d" % checkpoint['step'],
              flush=True)
        args.restore_step = checkpoint['step']
    else:
        args.restore_step = 0

    if use_cuda:
        model.cuda()
        criterion.cuda()

    # DISTRUBUTED
    if num_gpus > 1:
        model = apply_gradient_allreduce(model)

    if c.noam_schedule:
        scheduler = NoamLR(optimizer,
                           warmup_steps=c.warmup_steps,
                           last_epoch=args.restore_step - 1)
    else:
        scheduler = None

    num_params = count_parameters(model)
    print("\n > Model has {} parameters".format(num_params), flush=True)

    if 'best_loss' not in locals():
        best_loss = float('inf')

    global_step = args.restore_step
    for epoch in range(0, c.epochs):
        c_logger.print_epoch_start(epoch, c.epochs)
        # set gradual training
        if c.gradual_training is not None:
            r, c.batch_size = gradual_training_scheduler(global_step, c)
            c.r = r
            model.decoder.set_r(r)
            if c.bidirectional_decoder:
                model.decoder_backward.set_r(r)
            print("\n > Number of output frames:", model.decoder.r)
        train_avg_loss_dict, global_step = train(model, criterion, optimizer,
                                                 optimizer_st, scheduler, ap,
                                                 global_step, epoch, scaler,
                                                 scaler_st, speaker_mapping)
        eval_avg_loss_dict = evaluate(model, criterion, ap, global_step, epoch,
                                      speaker_mapping)
        c_logger.print_epoch_end(epoch, eval_avg_loss_dict)
        target_loss = train_avg_loss_dict['avg_postnet_loss']
        if c.run_eval:
            target_loss = eval_avg_loss_dict['avg_postnet_loss']
        best_loss = save_best_model(
            target_loss,
            best_loss,
            model,
            optimizer,
            global_step,
            epoch,
            c.r,
            OUT_PATH,
            scaler=scaler.state_dict() if c.mixed_precision else None)