batch_t = torch.tensor( time_fit[batch_idx] ) # torch.stack([time_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0) batch_x0 = torch.tensor( x_fit[batch_start]) # x_meas_torch_fit[batch_start, :] # (M, D) batch_u = torch.tensor( u_fit[batch_idx] ) # torch.stack([u_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0) batch_x = torch.tensor( x_fit[batch_idx] ) # torch.stack([x_meas_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0) return batch_t, batch_x0, batch_u, batch_x ss_model = NeuralStateSpaceModel( n_x=2, n_u=1, n_feat=64) # NeuralStateSpaceModelLin(A_nominal*Ts, B_nominal*Ts) nn_solution = NeuralStateSpaceSimulator(ss_model) # nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", "model_SS_1step.pkl"))) params = list(nn_solution.ss_model.parameters()) optimizer = optim.Adam(params, lr=1e-5) end = time.time() with torch.no_grad(): batch_t, batch_x0, batch_u, batch_x = get_batch(batch_size, seq_len) batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u) err = batch_x - batch_x_pred loss_scale = torch.mean(err**2) LOSS = []
x_noise = x_noise.astype(np.float32) y_noise = x_noise[:, [y_var_idx]] # Build validation data t_val_start = 0 t_val_end = time_data[-1] idx_val_start = int(t_val_start//Ts) idx_val_end = int(t_val_end//Ts) u_val = u[idx_val_start:idx_val_end] x_meas_val = x_noise[idx_val_start:idx_val_end] x_true_val = x[idx_val_start:idx_val_end] y_val = y[idx_val_start:idx_val_end] time_val = time_data[idx_val_start:idx_val_end] # Setup neural model structure and load fitted model parameters ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64) nn_solution = NeuralStateSpaceSimulator(ss_model) model_filename = f"model_SS_{model_type}.pkl" nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", model_filename))) # Evaluate the model in open-loop simulation against validation data x_0 = x_meas_val[0, :] with torch.no_grad(): x_sim_torch = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(u_val)) loss = torch.mean(torch.abs(x_sim_torch - torch.tensor(x_true_val))) # Plot results x_sim = np.array(x_sim_torch) if not plot_input: fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 5.5)) else:
x0_torch = torch.from_numpy(x[0, :]) N = np.shape(y)[0] Ts = time_data[1] - time_data[0] std_noise_V = 0.0 * 5.0 std_noise_I = 0.0 * 0.5 std_noise = np.array([std_noise_V, std_noise_I]) x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise x_noise = x_noise.astype(np.float32) y_noise = np.copy(y) # Initialize optimization ss_model = NeuralStateSpaceModel( n_x=2, n_u=1, n_feat=64) # NeuralStateSpaceModelLin(A_nominal*Ts, B_nominal*Ts) ss_model = torch.jit.script(NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)) nn_solution = NeuralStateSpaceSimulator(ss_model) nn_solution.ss_model.load_state_dict( torch.load(os.path.join("models", "model_SS_1step_nonoise.pkl"))) # In[Validate model] t_val_start = 0 t_val_end = time_data[-1] idx_val_start = int(t_val_start // Ts) # x.shape[0] idx_val_end = int(t_val_end // Ts) # x.shape[0] # Build fit data u_val = u[idx_val_start:idx_val_end] x_val = x_noise[idx_val_start:idx_val_end]
u = np.array(df_X[COL_U], dtype=np.float32) x0_torch = torch.from_numpy(x[0, :]) x_noise = np.copy(x) # np.random.randn(*x.shape)*std_noise x_noise = x_noise.astype(np.float32) Ts = time_data[1] - time_data[0] # Get fit data t_fit = time_data[-1] # use all data n_fit = int(t_fit // Ts) input_data = u[0:n_fit] state_data = x_noise[0:n_fit] u_torch = torch.from_numpy(input_data) x_true_torch = torch.from_numpy(state_data) # Setup neural model structure ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64, init_small=False) nn_solution = NeuralStateSpaceSimulator(ss_model) # Setup optimizer optimizer = optim.Adam(nn_solution.ss_model.parameters(), lr=lr) # Scale loss with respect to the initial one with torch.no_grad(): x_est_torch = nn_solution.f_onestep(x_true_torch, u_torch) err_init = x_est_torch - x_true_torch scale_error = torch.sqrt(torch.mean(err_init**2, dim=0)) # Training loop LOSS = [] start_time = time.time() for itr in range(0, num_iter):
COL_Y = ['V_C'] df_X = pd.read_csv(os.path.join("data", "RLC_data_sat_FE.csv")) time_data = np.array(df_X[COL_T], dtype=np.float32) y = np.array(df_X[COL_Y], dtype=np.float32) x = np.array(df_X[COL_X], dtype=np.float32) u = np.array(df_X[COL_U], dtype=np.float32) t = np.array(df_X[COL_T], dtype=np.float32) x0_torch = torch.from_numpy(x[0,:]) Ts = time_data[1] - time_data[0] n_x = 2 n_u = 1 n_hidden = 64 ss_model = NeuralStateSpaceModel(n_x, n_u, n_hidden) nn_solution = NeuralStateSpaceSimulator(ss_model) nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", "model_ARX_FE_sat.pkl"))) x_torch = torch.tensor(x) x0_torch = torch.tensor(x[0,:]) u_torch = torch.tensor(u) with torch.no_grad(): x_sim_torch = nn_solution.f_sim(x0_torch, u_torch) loss = torch.mean(torch.abs(x_sim_torch - x_torch)) x_sim = np.array(x_sim_torch) n_plot = t.size fig,ax = plt.subplots(3,1,sharex=True) ax[0].plot(t[:n_plot], x[:n_plot, 0], label='True')
df_X = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref.csv")) t = np.array(df_X[COL_T], dtype=np.float32) y = np.array(df_X[COL_Y], dtype=np.float32) x = np.array(df_X[COL_X], dtype=np.float32) #u = np.array(df_X[COL_U],dtype=np.float32) u = np.array(df_X[COL_U], dtype=np.float32) Ts = t[1] - t[0] x_noise = x # In[Model] ss_model = CartPoleStateSpaceModel(Ts) model_name = "model_ARX_FE_nonoise.pkl" ss_model.load_state_dict(torch.load(os.path.join("models", model_name))) ss_model_residual = NeuralStateSpaceModel(n_x=4, n_u=1, n_feat=64) nn_solution = NeuralSumODE([ss_model, ss_model_residual]) # In[Setup optimization problem] len_fit = 40 n_fit = int(len_fit // Ts) u_fit = u[0:n_fit] x_fit = x_noise[0:n_fit] t_fit = t[0:n_fit] u_fit_torch = torch.from_numpy(u_fit) x_meas_fit_torch = torch.from_numpy(x_fit) t_fit_torch = torch.from_numpy(t_fit) num_iter = 20000 test_freq = 1