def train(args): # set random seed torch.manual_seed(args.seed) np.random.seed(args.seed) # init model and optimizer if args.verbose: print("Training baseline model:" if args.baseline else "Training HNN model:") output_dim = args.input_dim if args.baseline else 2 nn_model = MLP(args.input_dim, args.hidden_dim, output_dim, args.nonlinearity) model = HNN(args.input_dim, differentiable_model=nn_model, field_type=args.field_type, baseline=args.baseline) optim = torch.optim.Adam(model.parameters(), args.learn_rate, weight_decay=0) # arrange data data = get_dataset(args.name, args.save_dir, verbose=True) x = torch.tensor( data['coords'], requires_grad=True, dtype=torch.float32) test_x = torch.tensor( data['test_coords'], requires_grad=True, dtype=torch.float32) dxdt = torch.Tensor(data['dcoords']) test_dxdt = torch.Tensor(data['test_dcoords']) # vanilla train loop stats = {'train_loss': [], 'test_loss': []} for step in range(args.total_steps+1): # train step ixs = torch.randperm(x.shape[0])[:args.batch_size] dxdt_hat = model.time_derivative(x[ixs]) dxdt_hat += args.input_noise * torch.randn(*x[ixs].shape) # add noise, maybe loss = L2_loss(dxdt[ixs], dxdt_hat) loss.backward() grad = torch.cat([p.grad.flatten() for p in model.parameters()]).clone() optim.step() ; optim.zero_grad() # run test data test_ixs = torch.randperm(test_x.shape[0])[:args.batch_size] test_dxdt_hat = model.time_derivative(test_x[test_ixs]) test_dxdt_hat += args.input_noise * torch.randn(*test_x[test_ixs].shape) # add noise, maybe test_loss = L2_loss(test_dxdt[test_ixs], test_dxdt_hat) # logging stats['train_loss'].append(loss.item()) stats['test_loss'].append(test_loss.item()) if args.verbose and step % args.print_every == 0: print("step {}, train_loss {:.4e}, test_loss {:.4e}, grad norm {:.4e}, grad std {:.4e}" .format(step, loss.item(), test_loss.item(), grad@grad, grad.std())) train_dxdt_hat = model.time_derivative(x) train_dist = (dxdt - train_dxdt_hat)**2 test_dxdt_hat = model.time_derivative(test_x) test_dist = (test_dxdt - test_dxdt_hat)**2 print('Final train loss {:.4e} +/- {:.4e}\nFinal test loss {:.4e} +/- {:.4e}' .format(train_dist.mean().item(), train_dist.std().item()/np.sqrt(train_dist.shape[0]), test_dist.mean().item(), test_dist.std().item()/np.sqrt(test_dist.shape[0]))) return model, stats
def get_hnn_model(args, baseline): output_dim = args.input_dim if args.baseline else 2 nn_model = MLP(args.input_dim, 400, output_dim, args.nonlinearity) model = HNN(args.input_dim, differentiable_model=nn_model, field_type=args.field_type, baseline=args.baseline) label = '-baseline' if args.baseline else '-hnn' label = label + '-rad' if args.rad else label path = '{}/{}{}.tar'.format(args.save_dir, args.name, label) return model
if args.model == 'baseline': print('Training baseline model ...') out_dim = args.input_dim model = BLNN(args.input_dim, args.hidden_dim, out_dim, args.activation_fn) optim = torch.optim.Adam(model.parameters(), args.learn_rate, weight_decay=1e-4) stats = model.train(args, data, optim) else: print('Training hamiltonian neural network ...') out_dim = 1 nn_model = BLNN(args.input_dim, args.hidden_dim, out_dim, args.activation_fn) model = HNN(args.input_dim, baseline_model=nn_model) optim = torch.optim.Adam(model.parameters(), args.learn_rate, weight_decay=1e-4) stats = model.train(args, data, optim) os.makedirs(args.save_dir) if not os.path.exists(args.save_dir) else None if args.model == 'baseline': label = 'baseline' else: label = 'hnn' path = '{}/{}_nlayers_{}-orbits-{}_integrator_{}_epochs_{}_BatchSize_{}.tar'.format( args.save_dir, args.name, len(args.hidden_dim), label, args.integrator_scheme, args.epochs, args.batch_size)
def train(args): # set random seed torch.manual_seed(args.seed) np.random.seed(args.seed) # init model and optimizer if args.verbose: print("Training baseline model:" if args.baseline else "Training HNN model:") output_dim = args.input_dim if args.baseline else 2 nn_model = MLP(args.input_dim, 400, output_dim, args.nonlinearity) model = HNN(args.input_dim, differentiable_model=nn_model, field_type=args.field_type, baseline=args.baseline) optim = torch.optim.Adam(model.parameters(), args.learn_rate, weight_decay=1e-4) # the data API is different # make sure it is a fair comparison # generate the data the same way as in the SymODEN # compute the time derivative based on the generated data us = [0.0] data = get_dataset(seed=args.seed, save_dir=args.save_dir, rad=args.rad, us=us, samples=50, timesteps=45) # arrange data train_x, t_eval = data['x'][0,:,:,0:2], data['t'] test_x, t_eval = data['test_x'][0,:,:,0:2], data['t'] train_dxdt = (train_x[1:,:,:] - train_x[:-1,:,:]) / (t_eval[1] - t_eval[0]) test_dxdt = (test_x[1:,:,:] - test_x[:-1,:,:]) / (t_eval[1] - t_eval[0]) train_x = train_x[0:-1,:,:].reshape((-1,2)) test_x = test_x[0:-1,:,:].reshape((-1,2)) test_dxdt = test_dxdt.reshape((-1,2)) train_dxdt = train_dxdt.reshape((-1,2)) x = torch.tensor( train_x, requires_grad=True, dtype=torch.float32) test_x = torch.tensor( test_x, requires_grad=True, dtype=torch.float32) dxdt = torch.Tensor(train_dxdt) test_dxdt = torch.Tensor(test_dxdt) # vanilla train loop stats = {'train_loss': [], 'test_loss': []} for step in range(args.total_steps+1): # train step dxdt_hat = model.rk4_time_derivative(x) if args.use_rk4 else model.time_derivative(x) loss = L2_loss(dxdt, dxdt_hat) loss.backward() ; optim.step() ; optim.zero_grad() # run test data test_dxdt_hat = model.rk4_time_derivative(test_x) if args.use_rk4 else model.time_derivative(test_x) test_loss = L2_loss(test_dxdt, test_dxdt_hat) # logging stats['train_loss'].append(loss.item()) stats['test_loss'].append(test_loss.item()) if args.verbose and step % args.print_every == 0: print("step {}, train_loss {:.4e}, test_loss {:.4e}".format(step, loss.item(), test_loss.item())) train_dxdt_hat = model.time_derivative(x) train_dist = (dxdt - train_dxdt_hat)**2 test_dxdt_hat = model.time_derivative(test_x) test_dist = (test_dxdt - test_dxdt_hat)**2 print('Final train loss {:.4e} +/- {:.4e}\nFinal test loss {:.4e} +/- {:.4e}' .format(train_dist.mean().item(), train_dist.std().item()/np.sqrt(train_dist.shape[0]), test_dist.mean().item(), test_dist.std().item()/np.sqrt(test_dist.shape[0]))) return model, stats
def train(args): # set random seed torch.manual_seed(args.seed) np.random.seed(args.seed) # init model and optimizer if args.verbose: print("Training baseline model:" if args. baseline else "Training HNN model:") output_dim = args.input_dim if args.baseline else 2 nn_model = MLP(args.input_dim, args.hidden_dim, output_dim, args.nonlinearity) model = HNN(args.input_dim, differentiable_model=nn_model, field_type=args.field_type, baseline=args.baseline) optim = torch.optim.Adam(model.parameters(), args.learn_rate, weight_decay=1e-4) # arrange data data = get_dataset(seed=args.seed) x = torch.tensor(data['x'], requires_grad=True, dtype=torch.float32) test_x = torch.tensor(data['test_x'], requires_grad=True, dtype=torch.float32) dxdt = torch.Tensor(data['dx']) test_dxdt = torch.Tensor(data['test_dx']) # vanilla train loop stats = {'train_loss': [], 'test_loss': []} for step in range(args.total_steps + 1): # train step dxdt_hat = model.rk4_time_derivative( x) if args.use_rk4 else model.time_derivative(x) loss = L2_loss(dxdt, dxdt_hat) loss.backward() optim.step() optim.zero_grad() # run test data test_dxdt_hat = model.rk4_time_derivative( test_x) if args.use_rk4 else model.time_derivative(test_x) test_loss = L2_loss(test_dxdt, test_dxdt_hat) # logging stats['train_loss'].append(loss.item()) stats['test_loss'].append(test_loss.item()) if args.verbose and step % args.print_every == 0: print("step {}, train_loss {:.4e}, test_loss {:.4e}".format( step, loss.item(), test_loss.item())) train_dxdt_hat = model.time_derivative(x) train_dist = (dxdt - train_dxdt_hat)**2 test_dxdt_hat = model.time_derivative(test_x) test_dist = (test_dxdt - test_dxdt_hat)**2 print( 'Final train loss {:.4e} +/- {:.4e}\nFinal test loss {:.4e} +/- {:.4e}' .format(train_dist.mean().item(), train_dist.std().item() / np.sqrt(train_dist.shape[0]), test_dist.mean().item(), test_dist.std().item() / np.sqrt(test_dist.shape[0]))) return model, stats
def train(args): if torch.cuda.is_available() and not args.cpu: device = torch.device("cuda:0") torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.empty_cache() print("Running on the GPU") else: device = torch.device("cpu") print("Running on the CPU") # set random seed torch.manual_seed(args.seed) np.random.seed(args.seed) print("{} {}".format(args.folder, args.speed)) print("Training scaled model:" if args.scaled else "Training noisy model:") print('{} pairs of coords in latent space '.format(args.latent_dim)) #using universal autoencoder, pre-encode the training points autoencoder = MLPAutoencoder(args.input_dim_ae, args.hidden_dim, args.latent_dim * 2, nonlinearity='relu') full_model = PixelHNN(args.latent_dim * 2, args.hidden_dim, autoencoder=autoencoder, nonlinearity=args.nonlinearity, baseline=args.baseline) path = "{}/saved_models/{}.tar".format(args.save_dir, args.ae_path) full_model.load_state_dict(torch.load(path)) full_model.eval() autoencoder_model = full_model.autoencoder # get dataset (no test data for now) data = get_dataset(args.folder, args.speed, scaled=args.scaled, split=args.split_data, experiment_dir=args.experiment_dir, tensor=True) gcoords = autoencoder_model.encode(data).cpu().detach().numpy() x = torch.tensor(gcoords, dtype=torch.float, requires_grad=True) dx_np = full_model.time_derivative( torch.tensor(gcoords, dtype=torch.float, requires_grad=True)).cpu().detach().numpy() dx = torch.tensor(dx_np, dtype=torch.float) nnmodel = MLP(args.input_dim, args.hidden_dim, args.output_dim) model = HNN(2, nnmodel) model.to(device) optim = torch.optim.Adam(model.parameters(), args.learn_rate, weight_decay=args.weight_decay) # vanilla ae train loop stats = {'train_loss': [], 'test_loss': []} for step in range(args.total_steps + 1): # train step ixs = torch.randperm(x.shape[0])[:args.batch_size] x_train, dxdt = x[ixs].to(device), dx[ixs].to(device) dxdt_hat = model.time_derivative(x_train) loss = L2_loss(dxdt, dxdt_hat) loss.backward() optim.step() optim.zero_grad() stats['train_loss'].append(loss.item()) if step % args.print_every == 0: print("step {}, train_loss {:.4e}".format(step, loss.item())) # train_dist = hnn_ae_loss(x, x_next, model, return_scalar=False) # print('Final train loss {:.4e} +/- {:.4e}' # .format(train_dist.mean().item(), train_dist.std().item() / np.sqrt(train_dist.shape[0]))) return model
def train(args): # set random seed torch.manual_seed(args.seed) np.random.seed(args.seed) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) # init model and optimizer if args.verbose: print("Training baseline model:" if args.baseline else "Training HNN model:") output_dim = args.input_dim if args.baseline else 2 nn_model = MLPAutoencoder(args.input_dim, args.hidden_dim, args.latent_dim, args.nonlinearity) nn_model.to(device) model = HNN(args.input_dim, differentiable_model=nn_model, field_type=args.field_type, baseline=args.baseline, device=device) model.to(device) optim = torch.optim.Adam(model.parameters(), args.learn_rate, weight_decay=0) # arrange data X = np.load('statrectinputs.npy') Y = np.load('statrectoutputs.npy') Y[~np.isfinite(Y)] = 0 xm, xd = give_min_and_dist(X) ym, yd= give_min_and_dist(Y) X = scale(X, xm, xd) Y = scale(Y, ym, yd) n_egs = X.shape[0] x = X[0:int(0.8*n_egs),:] test_x = torch.tensor(X[:-int(0.2*n_egs),:], requires_grad=True, dtype=torch.float32) dxdt = Y[0:int(0.8*n_egs),:] test_dxdt = torch.tensor(Y[:-int(0.2*n_egs),:]) # vanilla train loop stats = {'train_loss': [], 'test_loss': []} for step in range(args.total_steps+1): # train step ixs = torch.randperm(x.shape[0])[:args.batch_size] x = torch.tensor(x[ixs], requires_grad=True, dtype=torch.float32) x.to(device) dxdt_hat = model.time_derivative(x) y = torch.tensor(dxdt[ixs]) y.to(device) loss = L2_loss(y, dxdt_hat) loss.backward() grad = torch.cat([p.grad.flatten() for p in model.parameters()]).clone() optim.step() ; optim.zero_grad() # run test data test_ixs = torch.randperm(test_x.shape[0])[:args.batch_size] test_dxdt_hat = model.time_derivative(test_x[test_ixs]) #test_dxdt_hat += args.input_noise * torch.randn(*test_x[test_ixs].shape) # add noise, maybe test_loss = L2_loss(test_dxdt[test_ixs], test_dxdt_hat) # logging stats['train_loss'].append(loss.item()) stats['test_loss'].append(test_loss.item()) if args.verbose and step % args.print_every == 0: print("step {}, train_loss {:.4e}, test_loss {:.4e}, grad norm {:.4e}, grad std {:.4e}" .format(step, loss.item(), test_loss.item(), grad@grad, grad.std())) ixs = torch.randperm(x.shape[0])[:10000] x = torch.tensor(x[ixs], requires_grad=True, dtype=torch.float32) x.to(device) enc = model.encoding(x).detach().numpy() print(x.shape) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x = x.detach().numpy() img = ax.scatter(enc[:,0], enc[:,3], enc[:,2], c=enc[:,1], cmap=plt.hot()) fig.colorbar(img) plt.savefig('lrep.png') y0 = torch.tensor([0.4, 0.3, 1/np.sqrt(2), 1/np.sqrt(2)], dtype=torch.float32) update_fn = lambda t, y0: model_update(t, y0, model) orbit, settings = get_orbit(y0, t_points=10, t_span=[0, 10], update_fn=update_fn) print(orbit) plt.scatter(orbit[:,0], orbit[:, 1]) plt.savefig('orbit.png') return model, stats
def train(args): if torch.cuda.is_available() and not args.cpu: device = torch.device("cuda:0") torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.empty_cache() print("Running on the GPU") else: device = torch.device("cpu") print("Running on the CPU") # set random seed torch.manual_seed(args.seed) np.random.seed(args.seed) # get dataset (no test data for now) angular_velo, acc_1, acc_2, sound = get_dataset_split( args.folder, args.speed, scaled=args.scaled, experiment_dir=args.experiment_dir, tensor=True) sub_col = { 0: [angular_velo, 1, 'v'], 1: [acc_1, 3, 'a1'], 2: [acc_2, 3, 'a2'], 3: [sound, 1, 's'] } col2use = sub_col[args.sub_columns][0] # using universal autoencoder, pre-encode the training points autoencoder = MLPAutoencoder(sub_col[args.sub_columns][1], args.hidden_dim, args.latent_dim * 2, dropout_rate=args.dropout_rate_ae) full_model = PixelHNN(args.latent_dim * 2, args.hidden_dim, autoencoder=autoencoder, nonlinearity=args.nonlinearity, baseline=args.baseline, dropout_rate=args.dropout_rate) path = "{}/saved_models/{}-{}.tar".format(args.save_dir, args.ae_path, sub_col[args.sub_columns][2]) full_model.load_state_dict(torch.load(path)) full_model.eval() autoencoder_model = full_model.autoencoder gcoords = autoencoder_model.encode(col2use).cpu().detach().numpy() x = torch.tensor(gcoords, dtype=torch.float, requires_grad=True) dx_np = full_model.time_derivative( torch.tensor(gcoords, dtype=torch.float, requires_grad=True)).cpu().detach().numpy() dx = torch.tensor(dx_np, dtype=torch.float) nnmodel = MLP(args.input_dim, args.hidden_dim, args.output_dim) model = HNN(2, nnmodel) model.to(device) optim = torch.optim.Adam(model.parameters(), args.learn_rate, weight_decay=args.weight_decay) print("Data from {} {}, column: {}".format(args.folder, args.speed, sub_col[args.sub_columns][2])) # x = torch.tensor(col2use[:-1], dtype=torch.float) # x_next = torch.tensor(col2use[1:], dtype=torch.float) # # autoencoder = MLPAutoencoder(sub_col[args.sub_columns][1], args.hidden_dim, args.latent_dim * 2, dropout_rate=args.dropout_rate_ae) # model = PixelHNN(args.latent_dim * 2, args.hidden_dim, # autoencoder=autoencoder, nonlinearity=args.nonlinearity, baseline=args.baseline, dropout_rate=args.dropout_rate) # model.to(device) # optim = torch.optim.Adam(model.parameters(), args.learn_rate, weight_decay=args.weight_decay) # vanilla ae train loop stats = {'train_loss': []} for step in range(args.total_steps + 1): # train step ixs = torch.randperm(x.shape[0])[:args.batch_size] x_train, dxdt = x[ixs].to(device), dx[ixs].to(device) dxdt_hat = model.time_derivative(x_train) loss = L2_loss(dxdt, dxdt_hat) loss.backward() optim.step() optim.zero_grad() stats['train_loss'].append(loss.item()) if step % args.print_every == 0: print("step {}, train_loss {:.4e}".format(step, loss.item())) # train_dist = hnn_ae_loss(x, x_next, model, return_scalar=False) # print('Final train loss {:.4e} +/- {:.4e}' # .format(train_dist.mean().item(), train_dist.std().item() / np.sqrt(train_dist.shape[0]))) return model