def build_model(hps, log): model = WaveNODE(hps) n_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print('number of parameters:', n_params) state = {} state['n_params'] = n_params log.write('%s\n' % json.dumps(state)) log.flush() return model
def build_model(hps): model = WaveNODE(hps) print('number of parameters:', sum(p.numel() for p in model.parameters() if p.requires_grad)) return model
return model, g_epoch, g_step if __name__ == "__main__": global global_step global start_time device = torch.device("cuda" if torch.cuda.is_available() else "cpu") args = parse_args() log_path, load_path = mkdir(args, test=True) log_speed = get_logger(log_path, args.model_name, test_speed=True) synth_loader = load_dataset(args) hps = Hyperparameters(args) model = build_model(hps) model, global_epoch, global_step = load_checkpoint(args.load_step, model) model = WaveNODE.remove_weightnorm(model) model.to(device) model.eval() if args.tol_synth != args.tol: from model import NODEBlock print('change tolerance to {}'.format(args.tol_synth)) for block in model.blocks: if isinstance(block, NODEBlock): block.chains[2].test_atol = args.tol_synth block.chains[2].test_rtol = args.tol_synth with torch.no_grad(): synthesize(model, args.temp, args.tol_synth, log_speed) log_speed.close()