Beispiel #1
0
    # In[Validate model]
    t_val = time_data[-1]
    n_val = int(t_val // Ts)  # x.shape[0]

    input_data_val = u[0:n_val]
    state_data_val = x[0:n_val]
    output_data_val = y[0:n_val]

    x0_val = state_data_val[0, :]  # np.zeros(2,dtype=np.float32)
    x0_torch_val = torch.from_numpy(x0_val)
    u_torch_val = torch.tensor(input_data_val)
    x_true_torch_val = torch.from_numpy(state_data_val)

    with torch.no_grad():
        x_pred_torch_val = nn_solution.f_sim(x0_torch_val, u_torch_val)

    # In[1]

    fig, ax = plt.subplots(3, 1, sharex=True)
    ax[0].plot(np.array(x_true_torch_val[:, 0]), label='True')
    ax[0].plot(np.array(x_pred_torch_val[:, 0]), label='Fit')
    ax[0].legend()
    ax[0].grid(True)

    ax[1].plot(np.array(x_true_torch_val[:, 1]), label='True')
    ax[1].plot(np.array(x_pred_torch_val[:, 1]), label='Fit')
    ax[1].legend()
    ax[1].grid(True)

    ax[2].plot(np.array(u_torch_val), label='Input')
Beispiel #2
0
    u_val = u[idx_val_start:idx_val_end]
    x_meas_val = x_noise[idx_val_start:idx_val_end]
    x_true_val = x[idx_val_start:idx_val_end]
    y_val = y[idx_val_start:idx_val_end]
    time_val = time_data[idx_val_start:idx_val_end]

    # Setup neural model structure and load fitted model parameters
    ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
    nn_solution = NeuralStateSpaceSimulator(ss_model)
    model_filename = f"model_SS_{model_type}.pkl"
    nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", model_filename)))

    # Evaluate the model in open-loop simulation against validation data
    x_0 = x_meas_val[0, :]
    with torch.no_grad():
        x_sim_torch = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(u_val))
        loss = torch.mean(torch.abs(x_sim_torch - torch.tensor(x_true_val)))

    # Plot results
    x_sim = np.array(x_sim_torch)
    if not plot_input:
        fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 5.5))
    else:
        fig, ax = plt.subplots(3, 1, sharex=True, figsize=(6, 7.5))
    time_val_us = time_val*1e6

    if dataset_type == 'id':
        t_plot_start = 0.2e-3
    else:
        t_plot_start = 1.9e-3
    t_plot_end = t_plot_start + 0.32e-3
    # In[Load model]
    ss_model = CartPoleStateSpaceModel(Ts)
    nn_solution = NeuralStateSpaceSimulator(ss_model)
    #model_name = "model_OE_minibatch_100.pkl"
    model_name = "model_ARX_FE_nonoise.pkl"
    nn_solution.ss_model.load_state_dict(
        torch.load(os.path.join("models", model_name)))

    # In[Simulation plot]

    x_torch = torch.tensor(x)
    x0_torch = torch.tensor(x[0, :])
    u_torch = torch.tensor(u)
    t_torch = torch.tensor(t)
    with torch.no_grad():
        x_sim_torch = nn_solution.f_sim(x0_torch, u_torch)
        loss = torch.mean(torch.abs(x_sim_torch - x_torch))

    x_sim = np.array(x_sim_torch)

    n_plot = t.size
    fig, ax = plt.subplots(3, 1, sharex=True)
    ax[0].plot(t[:n_plot], x[:n_plot, 0], label='True')
    ax[0].plot(t[:n_plot], x_sim[:n_plot, 0], label='Simulated')
    ax[0].set_xlabel("Time (s)")
    ax[0].set_ylabel("Cart position (m)")
    ax[0].legend()
    ax[0].grid()

    ax[1].plot(t[:n_plot], x[:n_plot, 2], label='True')
    ax[1].plot(t[:n_plot], x_sim[:n_plot, 2], label='Simulated')
        optimizer.step()

    train_time = time.time() - start_time
    print(f"\nTrain time: {train_time:.2f}")

    if not os.path.exists("models"):
        os.makedirs("models")

    torch.save(nn_solution.ss_model.state_dict(),
               os.path.join("models", "model_SS_1step_nonoise.pkl"))

    # In[Plot]
    x_0 = state_data[0, :]
    time_start = time.time()
    with torch.no_grad():
        x_sim = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(input_data))
        loss_sc = torch.mean(torch.abs(x_sim - x_true_torch))
    time_arr = time.time() - time_start

    x_sim = np.array(x_sim)
    fig, ax = plt.subplots(3, 1, sharex=True)
    ax[0].plot(np.array(x_true_torch[:, 0]), 'k', label='True')
    ax[0].plot(x_sim[:, 0], 'r', label='Sim')
    ax[0].legend()
    ax[0].grid(True)
    ax[1].plot(np.array(x_true_torch[:, 1]), 'k', label='True')
    ax[1].plot(x_sim[:, 1], 'r', label='Sim')
    ax[1].legend()
    ax[1].grid(True)

    ax[2].plot(np.array(u_torch), 'b', label='Input')
Beispiel #5
0
        #params[-1].grad
        optimizer.step()

        time_meter.update(time.time() - end)
        loss_meter.update(loss.item())


        end = time.time()

    #torch.save(nn_solution.ss_model.state_dict(), os.path.join("models", "model.pkl")

    # In[Simulation performance]
    x0_fit = np.zeros(2,dtype=np.float32)
    x0_torch_fit = torch.from_numpy(x0_fit)
    with torch.no_grad():
        x_sim_torch_fit = nn_solution.f_sim(x0_torch_fit, u_torch_fit)

    # In[FIT]
    

    fig,ax = plt.subplots(3,1, sharex=True)
    ax[0].plot(np.array(np.array(x_meas_torch_fit[:,0].detach())), 'k*', label='Measured')
    ax[0].plot(np.array(np.array(x_hidden_torch_fit[:,0].detach())), 'b', label='Hidden')
    ax[0].plot(np.array(np.array(x_sim_torch_fit[:,0].detach())), 'g', label='Sim')
    ax[0].legend()
    ax[0].grid(True)

    ax[1].plot(np.array(np.array(x_meas_torch_fit[:,1].detach())), 'k*', label='Measured')
    ax[1].plot(np.array(np.array(x_hidden_torch_fit[:,1].detach())), 'b', label='Hidden')
    ax[1].plot(np.array(np.array(x_sim_torch_fit[:,1].detach())), 'g', label='Sim')
    optimizer = optim.Adam([
        {
            'params': params_net,
            'lr': lr
        },
        {
            'params': params_hidden,
            'lr': lr
        },
    ],
                           lr=lr)

    # Scale loss with respect to the initial one
    with torch.no_grad():
        x0_torch = x_hidden_fit[0, :]
        x_est_torch = nn_solution.f_sim(x0_torch, u_fit_torch)
        err_init = x_est_torch[:, [0]] - y_fit_torch
        scale_error = torch.sqrt(torch.mean((err_init)**2, dim=(0)))

    LOSS_TOT = []
    LOSS_FIT = []
    LOSS_CONSISTENCY = []
    start_time = time.time()
    # Training loop

    #scripted_nn_solution = torch.jit.script(nn_solution)
    for itr in range(0, num_iter):

        optimizer.zero_grad()

        x0_torch = x_hidden_fit[0, :]
Beispiel #7
0
    ax[1].legend()
    ax[1].grid(True)

    # Simulate
    y_val = np.copy(y_fit)
    u_val = np.copy(u_fit)

    #x0_val = np.array(x_est[0, :])
    #x0_val[1] = 0.0
    x0_val = x_hidden_fit[0, :].detach().numpy(
    )  # initial state had to be estimated, according to the dataset description
    x0_torch_val = torch.from_numpy(x0_val)
    u_torch_val = torch.tensor(u_val)

    with torch.no_grad():
        x_sim_torch = nn_solution.f_sim(x0_torch_val[None, :],
                                        u_torch_val[:, None, :])
        y_sim_torch = x_sim_torch[:, 0]
        x_sim = y_sim_torch.detach().numpy()

    # Simulation plot
    fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 7.5))
    #ax[0].plot(time_exp, q_ref,  'k',  label='$q_{\mathrm{ref}}$')
    ax[0].plot(time_exp, y_val, 'k', label='$y_{\mathrm{meas}}$')
    ax[0].plot(time_exp, x_sim[:, 0], 'r', label='$\hat y_{\mathrm{sim}}$')
    ax[0].legend(loc='upper right')
    ax[0].grid(True)
    ax[0].set_ylabel("Voltage (V)")

    ax[1].plot(time_exp, u_id, 'k', label='$u_{in}$')
    ax[1].set_xlabel("Time (s)")
    ax[1].set_ylabel("Voltage (V)")
    input_data = u[0:n_fit]
    state_data = x_noise[0:n_fit]
    u_torch = torch.from_numpy(input_data)
    x_true_torch = torch.from_numpy(state_data)

    # Setup neural model structure
    ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
    nn_solution = NeuralStateSpaceSimulator(ss_model)

    # Setup optimizer
    params = list(nn_solution.ss_model.parameters())
    optimizer = optim.Adam(params, lr=1e-3)

    # Scale loss with respect to the initial one
    with torch.no_grad():
        x_est_torch = nn_solution.f_sim(x0_torch, u_torch)
        err_init = x_est_torch - x_true_torch
        scale_error = torch.sqrt(torch.mean((err_init)**2, dim=(0)))

    start_time = time.time()
    LOSS = []
    # Training loop
    for itr in range(1, num_iter + 1):
        optimizer.zero_grad()

        # Perform open-loop simulation
        x_est_torch = nn_solution.f_sim(x0_torch, u_torch)

        # Compute fit loss
        err = x_est_torch - x_true_torch
        err_scaled = err / scale_error