예제 #1
0
    else:
      assistant = None

    # Model
    model = Tacotron(n_vocab=1+ len(ph_ids),
                     embedding_dim=256,
                     mel_dim=hparams.num_mels,
                     linear_dim=hparams.num_freq,
                     r=hparams.outputs_per_step,
                     padding_idx=hparams.padding_idx,
                     use_memory_mask=hparams.use_memory_mask,
                     )
    model = model.cuda()
    #model = DataParallelFix(model)

    optimizer = optim.Adam(model.parameters(),
                           lr=hparams.initial_learning_rate, betas=(
                               hparams.adam_beta1, hparams.adam_beta2),
                           weight_decay=hparams.weight_decay)

    # Load checkpoint
    if checkpoint_path:
        print("Load checkpoint from: {}".format(checkpoint_path))
        checkpoint = torch.load(checkpoint_path)
        model.load_state_dict(checkpoint["state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        try:
            global_step = int(checkpoint["global_step"])
            global_epoch = int(checkpoint["global_epoch"])
        except:
            # TODO
예제 #2
0
    model = Tacotron(
        n_vocab=1 + len(ph_ids),
        embedding_dim=256,
        mel_dim=hparams.num_mels,
        linear_dim=hparams.num_freq,
        r=hparams.outputs_per_step,
        padding_idx=hparams.padding_idx,
        use_memory_mask=hparams.use_memory_mask,
    )
    model_discriminator = LSTMDiscriminator(hparams.num_mels, 32, 2)

    model = model.cuda()
    model_discriminator.cuda()
    #model = DataParallelFix(model)

    optimizer = optim.Adam(model.parameters(),
                           lr=hparams.initial_learning_rate,
                           betas=(hparams.adam_beta1, hparams.adam_beta2),
                           weight_decay=hparams.weight_decay)

    optimizer_discriminator = optim.Adam(model.parameters(),
                                         lr=hparams.initial_learning_rate,
                                         betas=(hparams.adam_beta1,
                                                hparams.adam_beta2),
                                         weight_decay=hparams.weight_decay)

    # Load checkpoint
    if checkpoint_path:
        print("Load checkpoint from: {}".format(checkpoint_path))
        checkpoint = torch.load(checkpoint_path)
        model.load_state_dict(checkpoint["state_dict"])