# Dataset and Dataloader setup dataset = PyTorchDataset(X, Mel, Y) data_loader = data_utils.DataLoader(dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers, shuffle=True, collate_fn=collate_fn, pin_memory=hparams.pin_memory) # Model model = Tacotron( n_vocab=1 + len(charids), embedding_dim=256, mel_dim=hparams.num_mels, linear_dim=hparams.num_freq, r=hparams.outputs_per_step, padding_idx=hparams.padding_idx, use_memory_mask=hparams.use_memory_mask, ) model = model.cuda() optimizer = optim.Adam(model.parameters(), lr=hparams.initial_learning_rate, betas=(hparams.adam_beta1, hparams.adam_beta2), weight_decay=hparams.weight_decay) # Load checkpoint if checkpoint_path: print("Load checkpoint from: {}".format(checkpoint_path)) checkpoint = torch.load(checkpoint_path)
checkpoint_path = args["<checkpoint>"] text_list_file_path = args["<text_list_file>"] dst_dir = args["<dst_dir>"] max_decoder_steps = int(args["--max-decoder-steps"]) file_name_suffix = args["--file-name-suffix"] checkpoint = torch.load(checkpoint_path) checkpoints_dir = os.path.dirname(checkpoint_path) with open(checkpoints_dir + '/ids.json') as f: charids = json.load(f) model = Tacotron( n_vocab=len(charids) + 1, embedding_dim=256, mel_dim=hparams.num_mels, linear_dim=hparams.num_freq, r=hparams.outputs_per_step, padding_idx=hparams.padding_idx, use_memory_mask=hparams.use_memory_mask, ) checkpoint = torch.load(checkpoint_path) checkpoints_dir = os.path.dirname(checkpoint_path) with open(checkpoints_dir + '/ids.json') as f: charids = json.load(f) charids = dict(charids) model.load_state_dict(checkpoint["state_dict"]) model.decoder.max_decoder_steps = max_decoder_steps os.makedirs(dst_dir, exist_ok=True)