y_meas_fit, y_meas_fit[0:n_a])[n_max - 1:-1, :] # regressor 1 phi_fit_u = scipy.linalg.toeplitz(u_fit, u_fit[0:n_a])[n_max - 1:-1, :] phi_fit = np.hstack((phi_fit_y, phi_fit_u)) # Neglect initial values y_fit = y_fit[n_max:, :] y_meas_fit = y_meas_fit[n_max:, :] u_fit = u_fit[n_max:, :] # Build fit data phi_fit_torch = torch.from_numpy(phi_fit) y_meas_fit_torch = torch.from_numpy(y_meas_fit) # Setup neural model structure io_model = NeuralIOModel(n_a=n_a, n_b=n_b, n_feat=64, small_init=True) io_solution = NeuralIOSimulator(io_model) # Setup optimizer optimizer = optim.Adam(io_solution.io_model.parameters(), lr=lr) LOSS = [] start_time = time.time() # Training loop for itr in range(1, num_iter + 1): optimizer.zero_grad() # Perform one-step ahead prediction y_est_torch = io_solution.f_onestep(phi_fit_torch) # Compute fit loss err = y_est_torch - y_meas_fit_torch
v_fit = np.copy(u_fit) v_fit = np.vstack((np.zeros(n_b).reshape(-1, 1), v_fit)).astype(np.float32) phi_fit_u = scipy.linalg.toeplitz( v_fit, v_fit[0:n_a])[n_max - 1:-1, :] # used for the initial conditions on u # To pytorch tensors y_hidden_fit_torch = torch.tensor( y_hidden_fit_init, requires_grad=True) # hidden state. It is an optimization variable! y_meas_fit_torch = torch.tensor(y_meas_fit) u_fit_torch = torch.tensor(u_fit) # Setup neural model structure io_model = NeuralIOModel(n_a=n_a, n_b=n_b, n_feat=64) io_solution = NeuralIOSimulator(io_model) # Setup optimizer params_net = list(io_solution.io_model.parameters()) params_hidden = [y_hidden_fit_torch] optimizer = optim.Adam([ { 'params': params_net, 'lr': lr }, { 'params': params_hidden, 'lr': lr }, ], lr=lr)
phi_fit_y = scipy.linalg.toeplitz( h_fit, h_fit[0:n_a])[n_max - 1:-1, :] # regressor 1 phi_fit_u = scipy.linalg.toeplitz(v_fit, v_fit[0:n_a])[n_max - 1:-1, :] phi_fit = np.hstack((phi_fit_y, phi_fit_u)) # To pytorch tensors phi_fit_u_torch = torch.tensor(phi_fit_u) h_fit_torch = torch.tensor( h_fit, requires_grad=True) # this is an optimization variable! phi_fit_h_torch = get_torch_regressor_mat(h_fit_torch.view(-1), n_a) y_meas_fit_torch = torch.tensor(y_meas_fit) u_fit_torch = torch.tensor(u_fit) # Setup model an simulator io_model = NeuralIOModel(n_a=n_a, n_b=n_b, n_feat=64) io_solution = NeuralIOSimulator(io_model) #io_solution.io_model.load_state_dict(torch.load(os.path.join("models", "model_IO_1step_nonoise.pkl"))) params = list(io_solution.io_model.parameters()) + [h_fit_torch] optimizer = optim.Adam(params, lr=10e-4) end = time.time() loss_meter = RunningAverageMeter(0.97) def get_batch(batch_size, seq_len): num_train_samples = y_meas_fit_torch.shape[0] batch_start = np.random.choice(np.arange(num_train_samples - seq_len, dtype=np.int64), batch_size, replace=False) # batch start indices batch_idx = batch_start[:, np.newaxis] + np.arange( seq_len) # batch all indices batch_idx_seq_h = batch_start[:, np.newaxis] - 1 - np.arange(n_a)
y_noise = x_noise[:, [y_var_idx]] # Build validation data t_val_start = 0 t_val_end = time_data[-1] idx_val_start = int(t_val_start // Ts) #x.shape[0] idx_val_end = int(t_val_end // Ts) #x.shape[0] n_val = idx_val_end - idx_val_start u_val = np.copy(u[idx_val_start:idx_val_end]) y_val = np.copy(y[idx_val_start:idx_val_end]) y_meas_val = np.copy(y_noise[idx_val_start:idx_val_end]) time_val = time_data[idx_val_start:idx_val_end] # Setup neural model structure and load fitted model parameters io_model = NeuralIOModel(n_a=n_a, n_b=n_b, n_feat=64) io_solution = NeuralIOSimulator(io_model) model_filename = f"model_IO_{model_type}.pkl" io_solution.io_model.load_state_dict( torch.load(os.path.join("models", model_filename))) # Evaluate the model in open-loop simulation against validation data y_seq = np.zeros(n_a, dtype=np.float32) u_seq = np.zeros(n_b, dtype=np.float32) y_meas_val_torch = torch.tensor(y_meas_val) with torch.no_grad(): y_seq_torch = torch.tensor(y_seq) u_seq_torch = torch.tensor(u_seq) u_torch = torch.tensor(u_val) y_val_sim_torch = io_solution.f_sim(y_seq_torch, u_seq_torch, u_torch)