batch_x_hidden = x_hidden_fit[[batch_idx]]
        batch_u = torch.tensor(u_fit[batch_idx])
        batch_x = torch.tensor(x_fit[batch_idx])

        return batch_t, batch_x0_hidden, batch_u, batch_x, batch_x_hidden


    ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
    nn_solution = NeuralStateSpaceSimulator(ss_model)

    params = list(nn_solution.ss_model.parameters()) + [x_hidden_fit]
    optimizer = optim.Adam(params, lr=lr)

    with torch.no_grad():
        batch_t, batch_x0_hidden, batch_u, batch_x, batch_x_hidden = get_batch(batch_size, seq_len)
        batch_x_sim = nn_solution.f_sim_multistep(batch_x0_hidden, batch_u)
        err_init = batch_x_sim - batch_x
        scale_error = torch.sqrt(torch.mean((err_init) ** 2, dim=(0, 1)))

    LOSS = []
    start_time = time.time()
    for itr in range(0, num_iter):

        optimizer.zero_grad()
        batch_t, batch_x0_hidden, batch_u, batch_x, batch_x_hidden = get_batch(batch_size, seq_len)
        batch_x_sim = nn_solution.f_sim_multistep(batch_x0_hidden, batch_u)

        # Fit loss
        err_fit = batch_x_sim - batch_x
        err_fit_scaled = err_fit/scale_error
        loss_fit = torch.mean(err_fit_scaled**2)
Beispiel #2
0
        #batch_idx = batch_idx.T  # transpose indexes to obtain batches with structure (m, q, n_x)

        # Extract batch data
        batch_t = torch.tensor(time_fit[batch_idx])
        batch_x0_hidden = x_hidden_fit[batch_start, :]
        batch_x_hidden = x_hidden_fit[[batch_idx]]
        batch_u = torch.tensor(u_fit[batch_idx])
        batch_y = torch.tensor(y_fit[batch_idx])

        return batch_t, batch_x0_hidden, batch_u, batch_y, batch_x_hidden

    # Scale loss with respect to the initial one
    with torch.no_grad():
        batch_t, batch_x0_hidden, batch_u, batch_y, batch_x_hidden = get_batch(
            batch_size, seq_len)
        batch_x_sim = nn_solution.f_sim_multistep(batch_x0_hidden, batch_u)
        #traced_nn_solution = torch.jit.trace(nn_solution, (batch_x0_hidden, batch_u))
        err_init = batch_x_sim - batch_y
        scale_error = torch.sqrt(torch.mean(err_init**2, dim=(0, 1)))

    LOSS_TOT = []
    LOSS_FIT = []
    LOSS_CONSISTENCY = []
    start_time = time.time()
    # Training loop

    #scripted_nn_solution = torch.jit.script(nn_solution)
    for itr in range(0, num_iter):

        optimizer.zero_grad()
    scale_error = 1e0 / torch.sqrt(torch.mean(zoh_error**2, (0, 1)))

    # In[Fit model]
    ii = 0
    loss = None
    for itr in range(0, num_iter):

        if itr > 0 and itr % test_freq == 0:
            with torch.no_grad():
                print('Iter {:04d} | Total Loss {:.6f}'.format(
                    itr, loss.item()))
                ii += 1
        optimizer.zero_grad()
        batch_t, batch_x0, batch_u, batch_x = get_batch(batch_size, seq_len)

        batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u)
        err = batch_x - batch_x_pred
        err_scaled = err * scale_error
        loss = torch.mean(err_scaled**2)
        loss.backward()
        optimizer.step()

        end = time.time()

    # In[Save model parameters]
#    model_name = "model_SS_150step_nonoise.pkl"
    model_name = "model_SS_200step_nonoise.pkl"

    if not os.path.exists("models"):
        os.makedirs("models")
    torch.save(nn_solution.ss_model.state_dict(),