te = np.arange(N_sim_d) * ts _, d, _ = control.forced_response(Hud, te, e) d_fast = d[n_skip_d:] d_fast = d_fast.reshape(-1, 1) y_nonoise = np.copy(y) y_noise = y + d_fast # Prepare data u_torch = torch.tensor(u[None, ...], dtype=torch.float, requires_grad=False) y_meas_torch = torch.tensor(y_noise[None, ...], dtype=torch.float) y_true_torch = torch.tensor(y_nonoise[None, ...], dtype=torch.float) # In[Second-order dynamical system custom defined] G = SisoLinearDynamicalOperator(n_b, n_a) H_inv = SisoLinearDynamicalOperator(2, 2, n_k=1) with torch.no_grad(): G.b_coeff[0, 0, 0] = 0.01 G.b_coeff[0, 0, 1] = 0.0 G.a_coeff[0, 0, 0] = -0.9 G.b_coeff[0, 0, 1] = 0.01 # In[Setup optimizer] optimizer = torch.optim.Adam([ { 'params': G.parameters(), 'lr': lr },
u = np.array(h5_data[dataset_name]['u']) y = np.array(h5_data[dataset_name]['y']) y0 = np.array(h5_data[dataset_name]['y0']) # y = (y - np.mean(y[[0], :, :], axis=-2))/(np.std(y[[0], :, :], axis=-2)) batch_size = u.shape[0] seq_len = u.shape[1] n_u = u.shape[2] n_y = y.shape[2] # In[To tensors] u_torch = torch.tensor(u, dtype=torch.float32) y_torch = torch.tensor(y, dtype=torch.float32) # In[Deterministic model] G = SisoLinearDynamicalOperator(n_b, n_a, n_k=n_k) F = SisoStaticNonLinearity(n_hidden=10) # In[Log-likelihood] optimizer = torch.optim.Adam([ {'params': G.parameters(), 'lr': lr}, {'params': F.parameters(), 'lr': lr}, ], lr=lr) # In[Train] LOSS = [] start_time = time.time() for itr in range(0, num_iter):
u_fit = u[:n_fit:decimate] t_fit = t[0:n_fit:decimate] # In[Prepare data] u_fit_torch = torch.tensor(u_fit[None, ...], dtype=torch.float, requires_grad=False) y_fit_torch = torch.tensor(y_fit[None, ...], dtype=torch.float, requires_grad=False) y_hidden_torch = torch.tensor(y_fit[None, ...], dtype=torch.float, requires_grad=True) # optimize on the output to manage the feedback connection # In[First dynamical system custom defined] G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k) # Static non-linearity F_nl = SisoStaticNonLinearity() # Setup optimizer optimizer = torch.optim.Adam([ { 'params': G1.parameters(), 'lr': lr }, { 'params': F_nl.parameters(), 'lr': lr }, { 'params': [y_hidden_torch],
ts = 1 / fs t = np.arange(N) * ts # In[Fit data] y_fit = y[0:n_fit:decimate] u_fit = u[0:n_fit:decimate] t_fit = t[0:n_fit:decimate] # In[Prepare training tensors] u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False) y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float) # In[Prepare model] G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1) F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh') G2 = SisoLinearDynamicalOperator(n_b, n_a) def model(u_in): y1_lin = G1(u_fit_torch) y1_nl = F_nl(y1_lin) y_hat = G2(y1_nl) return y_hat, y1_nl, y1_lin # In[Setup optimizer] optimizer_ADAM = torch.optim.Adam([ { 'params': G1.parameters(), 'lr': lr_ADAM },
u = np.array(df_X[COL_U], dtype=np.float32) fs = np.array(df_X[COL_F].iloc[0], dtype=np.float32).item() N = y_meas.size ts = 1 / fs t = np.arange(N) * ts t_fit_start = 0 t_fit_end = 100000 t_test_start = 100000 t_test_end = 188000 t_skip = 1000 # skip for statistics # In[Instantiate models] # Create models G1 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a, n_k=1) G2 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a, n_k=0) F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh') model_folder = os.path.join("models", model_name) # Create model parameters G1.load_state_dict(torch.load(os.path.join(model_folder, "G1.pkl"))) F_nl.load_state_dict(torch.load(os.path.join(model_folder, "F_nl.pkl"))) G2.load_state_dict(torch.load(os.path.join(model_folder, "G2.pkl"))) # In[Predict] u_torch = torch.tensor(u[None, :, :]) y1_lin = G1(u_torch) y1_nl = F_nl(y1_lin) y_hat = G2(y1_nl)
ts = 1 / fs t = np.arange(N) * ts # Fit data y_fit = y[:n_fit:decimate] u_fit = u[:n_fit:decimate] t_fit = t[0:n_fit:decimate] # Prepare data u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False) y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float) # First dynamical system custom defined G1 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a) y_init_1 = torch.zeros((n_batch, n_a), dtype=torch.float) u_init_1 = torch.zeros((n_batch, n_b), dtype=torch.float) # Second dynamical system custom defined G2 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a) y_init_2 = torch.zeros((n_batch, n_a), dtype=torch.float) u_init_2 = torch.zeros((n_batch, n_b), dtype=torch.float) # Static non-linearity F_nl = StaticNonLin() # Setup optimizer optimizer = torch.optim.Adam([ { 'params': G1.parameters(),
# scale state x = x / np.array([100.0, 10.0]) # Add measurement noise std_noise_V = add_noise * 0.1 #y_nonoise = np.copy(1 + x[:, [0]] + x[:, [0]]**2) y_nonoise = np.copy(x[:, [0, 1]]) #np.copy(1 + x[:, [0]] ** 3) y_noise = y_nonoise + np.random.randn(*y_nonoise.shape) * std_noise_V # Prepare data u_torch = torch.tensor(u[None, :, :], dtype=torch.float, requires_grad=False) y_meas_torch = torch.tensor(y_noise[None, :, :], dtype=torch.float) y_true_torch = torch.tensor(y_nonoise[None, :, :], dtype=torch.float) G = SisoLinearDynamicalOperator(n_b, n_a, n_k=1) nn_static = SisoStaticNonLinearity() # Setup optimizer params_lin = G.parameters() optimizer = torch.optim.Adam([{ 'params': params_lin, 'lr': lr }, { 'params': nn_static.parameters(), 'lr': lr }], lr=lr) # In[Train] LOSS = []
e = np.random.randn(N_sim_d) te = np.arange(N_sim_d) * ts _, d, _ = control.forced_response(Hud, te, e) d_fast = d[n_skip_d:] d_fast = d_fast.reshape(-1, 1) y_fit_clean = np.copy(y_fit) y_fit = y_fit + d_fast # In[Prepare training tensors] u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False) y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float) # In[Prepare model] G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1) F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh') G2 = SisoLinearDynamicalOperator(n_b, n_a) H_inv = SisoLinearDynamicalOperator(2, 2, n_k=1) def model(u_in): y1_lin = G1(u_fit_torch) y1_nl = F_nl(y1_lin) y_hat = G2(y1_nl) return y_hat, y1_nl, y1_lin # In[Setup optimizer] optimizer_ADAM = torch.optim.Adam([ { 'params': G1.parameters(),
# Train on a single example u = u[0:1, ...] y = y[0:1, ...] batch_size = u.shape[0] seq_len = u.shape[1] n_u = u.shape[2] n_y = y.shape[2] # In[To tensors] u_torch = torch.tensor(u, dtype=torch.float32) y_torch = torch.tensor(y, dtype=torch.float32) # In[Deterministic model] G = SisoLinearDynamicalOperator(n_b, n_a, n_k=n_k) F = SisoStaticNonLinearity(n_hidden=10) model_folder = os.path.join("models", model_name_load) G.load_state_dict(torch.load(os.path.join(model_folder, "G.pkl"))) F.load_state_dict(torch.load(os.path.join(model_folder, "F.pkl"))) #F = StaticNonLinPoly(n_p=3) #with torch.no_grad(): # F.p_coeff[0] = 1.0 # F.p_coeff[1] = 1.0 # F.p_coeff[2] = 0.0 #with torch.no_grad(): # G.a_coeff[0, 0, 0] = 0.5 # G.b_coeff[0, 0, 0] = 1.0
n_skip_d = 0 N_sim_d = n_fit + n_skip_d e = np.random.randn(N_sim_d) te = np.arange(N_sim_d) * ts _, d, _ = control.forced_response(Hud, te, e) d_fast = d[n_skip_d:] d_fast = d_fast.reshape(-1, 1) y_fit_clean = np.copy(y_fit) y_fit = y_fit + d_fast # In[Prepare training tensors] u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False) y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float) # In[Prepare model] G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1) F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh') G2 = SisoLinearDynamicalOperator(n_b, n_a) def model(u_in): y1_lin = G1(u_fit_torch) y1_nl = F_nl(y1_lin) y_hat = G2(y1_nl) return y_hat, y1_nl, y1_lin # In[Setup optimizer] optimizer_ADAM = torch.optim.Adam([ {'params': G1.parameters(), 'lr': lr_ADAM}, {'params': G2.parameters(), 'lr': lr_ADAM}, {'params': F_nl.parameters(), 'lr': lr_ADAM}, ], lr=lr_ADAM)
N = y.size fs = 50 # Sampling frequency (Hz) ts = 1 / fs t = np.arange(N) * ts # Fit data y_fit = y[:n_fit:decimate] - 1.5 u_fit = u[:n_fit:decimate] t_fit = t[0:n_fit:decimate] # In[Prepare data] u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float) y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float) # In[Setup model] G1 = SisoLinearDynamicalOperator(n_b=4, n_a=4, n_k=1) F = SisoStaticNonLinearity(n_hidden=16, activation='tanh') G2 = SisoLinearDynamicalOperator(n_b=4, n_a=4, n_k=1) # Setup optimizer optimizer = torch.optim.Adam([ { 'params': G1.parameters(), 'lr': lr }, { 'params': F.parameters(), 'lr': lr }, ], lr=lr)
# Train on a single example u = u[[0], ...] y = y[[0], ...] batch_size = u.shape[0] seq_len = u.shape[1] n_u = u.shape[2] n_y = y.shape[2] # In[To tensors] u_torch = torch.tensor(u, dtype=torch.float32) y_torch = torch.tensor(y, dtype=torch.float32) # In[Deterministic model] G = SisoLinearDynamicalOperator(n_b, n_a, n_k=n_k) F = SisoStaticNonLinearity(n_hidden=10) model_folder = os.path.join("models", model_name) G.load_state_dict(torch.load(os.path.join(model_folder, "G.pkl"))) F.load_state_dict(torch.load(os.path.join(model_folder, "F.pkl"))) # In[Simulate] y_lin = G(u_torch) y_nl = F(y_lin) y_hat = y_nl # In[Detach] y_hat = y_hat.detach().numpy() # In[Predict] plt.plot(y0[0, :, 0], 'k', label='y0')