batched = args.batched samples = args.samples target = args.target overlap = args.overlap file = args.file gta = args.gta print('\nInitialising Model...\n') model = Model(rnn_dims=hp.voc_rnn_dims, fc_dims=hp.voc_fc_dims, bits=hp.bits, pad=hp.voc_pad, upsample_factors=hp.voc_upsample_factors, feat_dims=hp.num_mels, compute_dims=hp.voc_compute_dims, res_out_dims=hp.voc_res_out_dims, res_blocks=hp.voc_res_blocks, hop_length=hp.hop_length, sample_rate=hp.sample_rate).cuda() paths = Paths(hp.data_path, hp.voc_model_id, hp.tts_model_id) restore_path = args.weights if args.weights else paths.voc_latest_weights model.restore(restore_path) simple_table([('Generation Mode', 'Batched' if batched else 'Unbatched'), ('Target Samples', target if batched else 'N/A'), ('Overlap Samples', overlap if batched else 'N/A')])
msg = f'| Epoch: {e}/{epochs} ({i}/{total_iters}) | Loss: {avg_loss:#.4} | {speed:#.2} steps/s | Step: {k}k | ' stream(msg) model.save(paths.latest_weights) model.log(paths.log, msg) print(' ') print('\nInitialising Model...\n') model = Model(rnn_dims=hp.rnn_dims, fc_dims=hp.fc_dims, bits=hp.bits, pad=hp.pad, upsample_factors=hp.upsample_factors, feat_dims=hp.num_mels, compute_dims=hp.compute_dims, res_out_dims=hp.res_out_dims, res_blocks=hp.res_blocks, hop_length=hp.hop_length, sample_rate=hp.sample_rate).cuda() paths = Paths(hp.data_path, hp.model_id) model.restore(paths.latest_weights) optimiser = optim.Adam(model.parameters()) train_set, test_set = get_datasets(paths.data, batch_size) train_loop(model, optimiser, train_set, test_set, lr)
batched = args.batched target = args.target overlap = args.overlap input_text = args.input_text weights_path = args.weights_path print('\nInitialising WaveRNN Model...\n') # Instantiate WaveRNN Model voc_model = Model(rnn_dims=hp.voc_rnn_dims, fc_dims=hp.voc_fc_dims, bits=hp.bits, pad=hp.voc_pad, upsample_factors=hp.voc_upsample_factors, feat_dims=hp.num_mels, compute_dims=hp.voc_compute_dims, res_out_dims=hp.voc_res_out_dims, res_blocks=hp.voc_res_blocks, hop_length=hp.hop_length, sample_rate=hp.sample_rate).cuda() voc_model.restore('quick_start/voc_weights/latest_weights.pyt') print('\nInitialising Tacotron Model...\n') # Instantiate Tacotron Model tts_model = Tacotron(r=hp.tts_r, embed_dims=hp.tts_embed_dims, num_chars=len(symbols), encoder_dims=hp.tts_encoder_dims,
args = parser.parse_args() batch_size = args.batch_size force_train = args.force_train train_gta = args.gta lr = args.lr print('\nInitialising Model...\n') # Instantiate WaveRNN Model voc_model = Model(rnn_dims=hp.voc_rnn_dims, fc_dims=hp.voc_fc_dims, bits=hp.bits, pad=hp.voc_pad, upsample_factors=hp.voc_upsample_factors, feat_dims=hp.num_mels, compute_dims=hp.voc_compute_dims, res_out_dims=hp.voc_res_out_dims, res_blocks=hp.voc_res_blocks, hop_length=hp.hop_length, sample_rate=hp.sample_rate).cuda() # Check to make sure the hop length is correctly factorised assert np.cumprod(hp.voc_upsample_factors)[-1] == hp.hop_length paths = Paths(hp.data_path, hp.voc_model_id, hp.tts_model_id) voc_model.restore(paths.voc_latest_weights) optimiser = optim.Adam(voc_model.parameters())