ts_meas = df_data['Ts'][0].astype(np.float32)
    time_exp = np.arange(y_id.size).astype(np.float32) * ts_meas

    # Build initial state estimate
    x_est = np.zeros((time_exp.shape[0], 2), dtype=np.float32)
    x_est[:, 1] = np.copy(y_id[:, 0])

    # Create torch tensors
    x_hidden_fit_torch = torch.tensor(x_est, dtype=torch.float32, requires_grad=True)  # hidden state is an optimization variable
    y_fit_torch = torch.tensor(y_id, dtype=torch.float32)
    u_fit_torch = torch.tensor(u_id, dtype=torch.float32)

    # Build neural state-space model
    ts_integ = ts_meas
    ss_model = CascadedTanksOverflowNeuralStateSpaceModel(n_feat=100)  #, activation='tanh')
    nn_solution = ForwardEulerSimulator(ss_model, ts=ts_integ)

    # Setup optimizer
    params_net = list(ss_model.parameters())
    params_hidden = [x_hidden_fit_torch]
    optimizer = optim.Adam([
        {'params': params_net,    'lr': lr},
        {'params': params_hidden, 'lr': 100*lr},
    ], lr=lr)

    # Scaling factor for the loss
    scale_error = torch.tensor([1.0]).float()

    LOSS = []
    LOSS_FIT = []
    LOSS_CONSISTENCY = []
Example #2
0
    SNR = P_x / (P_n + 1e-10)
    SNR_db = 10 * np.log10(SNR)

    ts = time_data[1] - time_data[0]
    n_fit = int(t_fit // ts)
    ts_integ = 1.0

    # Fit data to pytorch tensors #
    u_fit = u[0:n_fit]
    x_fit = x_noise[0:n_fit]
    u_fit_torch = torch.from_numpy(u_fit)
    x_fit_torch = torch.from_numpy(x_fit)

    # Setup neural model structure
    ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
    nn_solution = ForwardEulerSimulator(ss_model)

    # Setup optimizer
    optimizer = optim.Adam(nn_solution.ss_model.parameters(), lr=lr)

    # Scale loss with respect to the initial one
    with torch.no_grad():
        DX = x_fit_torch[1:, :] - x_fit_torch[0:-1, :]
        scale_error = torch.sqrt(torch.mean(DX**2, dim=0))

    LOSS = []
    start_time = time.time()
    # Training loop
    for itr in range(0, num_iter):
        optimizer.zero_grad()
Example #3
0
    u_fit = u[0:n_fit]
    x_fit = x_noise[0:n_fit]
    x_fit_nonoise = x[0:n_fit]  # not used, just for reference
    time_fit = t[0:n_fit]

    # Fit data to pytorch tensors #
    u_torch_fit = torch.from_numpy(u_fit)
    x_meas_torch_fit = torch.from_numpy(x_fit)
    time_torch_fit = torch.from_numpy(time_fit)
    x_hidden_fit = torch.tensor(
        x_fit, requires_grad=True)  # hidden state is an optimization variable

    # Setup neural model structure
    ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
    nn_solution = ForwardEulerSimulator(
        ss_model
    )  #ForwardEulerSimulator(ss_model) #ExplicitRKSimulator(ss_model)

    # Setup optimizer
    params_net = list(nn_solution.ss_model.parameters())
    params_hidden = [x_hidden_fit]
    optimizer = optim.Adam([
        {
            'params': params_net,
            'lr': lr
        },
        {
            'params': params_hidden,
            'lr': 10 * lr
        },
    ],
Example #4
0
    ts_meas = df_data['Ts'][0].astype(np.float32)
    time_exp = np.arange(y.size).astype(np.float32) * ts_meas

    # Build validation data
    t_val_start = 0
    t_val_end = time_exp[-1]
    idx_val_start = int(t_val_start // ts_meas)
    idx_val_end = int(t_val_end // ts_meas)

    y_meas_val = y[idx_val_start:idx_val_end]
    u_val = u[idx_val_start:idx_val_end]
    time_val = time_exp[idx_val_start:idx_val_end]

    # Setup neural model structure
    ss_model = CascadedTanksOverflowNeuralStateSpaceModel(n_feat=100)
    nn_solution = ForwardEulerSimulator(
        ss_model, ts=ts_meas)  #ForwardEulerSimulator(ss_model, ts=ts)
    nn_solution.ss_model.load_state_dict(
        torch.load(os.path.join("models", model_name + ".pkl")))
    x_hidden_fit = torch.load(os.path.join("models", hidden_name + ".pkl"))

    # Evaluate the model in open-loop simulation against validation data
    x_0 = x_hidden_fit[0, :].detach().numpy(
    )  # initial state had to be estimated, according to the dataset description
    #x_0 = np.array([u_val[0], 0.0]).astype(np.float32)
    with torch.no_grad():
        x_sim_val_torch = nn_solution(torch.tensor(x_0[None, :]),
                                      torch.tensor(u_val[:, None, :]))
        x_sim_val_torch = x_sim_val_torch.squeeze(1)

    x_sim_val = np.array(x_sim_val_torch)
    y_sim_val = x_sim_val[:, [1]]