Example #1
0
    h5_data = h5py.File(filename, 'r')
    u = np.array(h5_data[dataset_name]['u'])
    y = np.array(h5_data[dataset_name]['y'])

    #    y = (y - np.mean(y[[0], :, :], axis=-2))/(np.std(y[[0], :, :], axis=-2))
    batch_size = u.shape[0]
    seq_len = u.shape[1]
    n_u = u.shape[2]
    n_y = y.shape[2]

    # In[To tensors]
    u_torch = torch.tensor(u, dtype=torch.float32)
    y_torch = torch.tensor(y, dtype=torch.float32)

    # In[Deterministic model]
    G = SisoLinearDynamicalOperator(n_b, n_a, n_k=n_k)
    F = SisoStaticNonLinearity(n_hidden=10)

    # In[Log-likelihood]

    optimizer = torch.optim.Adam([
        {
            'params': G.parameters(),
            'lr': lr
        },
        {
            'params': F.parameters(),
            'lr': lr
        },
    ],
                                 lr=lr)
    v_fit = v[0:n_fit:decimate]
    y_fit = y[0:n_fit:decimate]
    u_fit = u[0:n_fit:decimate]
    t_fit = t[0:n_fit:decimate]

    # In[Prepare training tensors]
    u_fit_torch = torch.tensor(u_fit[None, :, :],
                               dtype=torch.float,
                               requires_grad=False)
    bins_fit_torch = torch.tensor(bins_fit[None, :, :],
                                  dtype=torch.float,
                                  requires_grad=False)
    v_fit_torch = torch.tensor(v_fit[None, :, :], dtype=torch.float)

    # In[Prepare model]
    G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
    F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
    G2 = SisoLinearDynamicalOperator(n_b, n_a)

    log_sigma_hat = torch.tensor(
        np.log(1.0),
        requires_grad=True)  # torch.randn(1, requires_grad = True)

    def model(u_in):
        y1_lin = G1(u_fit_torch)
        y1_nl = F_nl(y1_lin)
        y_hat = G2(y1_nl)
        return y_hat, y1_nl, y1_lin

    # In[Setup optimizer]
    optimizer = torch.optim.Adam([
    N = y.size
    ts = 1/fs
    t = np.arange(N)*ts

    # In[Get fit data]
    y_fit = y[:n_fit:decimate]
    u_fit = u[:n_fit:decimate]
    t_fit = t[0:n_fit:decimate]

    # In[Prepare data]
    u_fit_torch = torch.tensor(u_fit[None, ...], dtype=torch.float, requires_grad=False)
    y_fit_torch = torch.tensor(y_fit[None, ...], dtype=torch.float, requires_grad=False)
    y_hidden_torch = torch.tensor(y_fit[None, ...], dtype=torch.float, requires_grad=True)
    # optimize on the output to manage the feedback connection
    # In[First dynamical system custom defined]
    G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k)
    # Static non-linearity
    F_nl = SisoStaticNonLinearity()

    # Setup optimizer
    optimizer = torch.optim.Adam([
        {'params': G1.parameters(), 'lr': lr},
        {'params': F_nl.parameters(), 'lr': lr},
        {'params': [y_hidden_torch], 'lr': 1e-3},
    ], lr=lr)


    # In[Structure]
    #   u ---> ----> G ------> y_lin
    #         |
    #         |
    e = np.random.randn(N_sim_d)
    te = np.arange(N_sim_d) * ts
    d, u = control.forced_response(Hud, te, e)
    d_fast = d[n_skip_d:]
    d_fast = d_fast.reshape(-1, 1)
    y_fit_clean = np.copy(y_fit)
    y_fit = y_fit + d_fast

    # In[Prepare training tensors]
    u_fit_torch = torch.tensor(u_fit[None, :, :],
                               dtype=torch.float,
                               requires_grad=False)
    y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)

    # In[Prepare model]
    G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
    F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
    G2 = SisoLinearDynamicalOperator(n_b, n_a)

    def model(u_in):
        y1_lin = G1(u_fit_torch)
        y1_nl = F_nl(y1_lin)
        y_hat = G2(y1_nl)
        return y_hat, y1_nl, y1_lin

    # In[Setup optimizer]
    optimizer_ADAM = torch.optim.Adam([
        {
            'params': G1.parameters(),
            'lr': lr_ADAM
        },
Example #5
0
    N = y.size
    fs = 50  # Sampling frequency (Hz)
    ts = 1 / fs
    t = np.arange(N) * ts

    # Fit data
    y_fit = y[:n_fit:decimate] - 1.5
    u_fit = u[:n_fit:decimate]
    t_fit = t[0:n_fit:decimate]

    # In[Prepare data]
    u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float)
    y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)

    # In[Setup model]
    G1 = SisoLinearDynamicalOperator(n_b=4, n_a=4, n_k=1)
    F = SisoStaticNonLinearity(n_hidden=16, activation='tanh')
    G2 = SisoLinearDynamicalOperator(n_b=4, n_a=4, n_k=1)

    # Setup optimizer
    optimizer = torch.optim.Adam([
        {
            'params': G1.parameters(),
            'lr': lr
        },
        {
            'params': F.parameters(),
            'lr': lr
        },
    ],
                                 lr=lr)
Example #6
0
    ts = 1 / fs
    t = np.arange(N) * ts

    # Fit data
    y_fit = y[:n_fit:decimate]
    u_fit = u[:n_fit:decimate]
    t_fit = t[0:n_fit:decimate]

    # Prepare data
    u_fit_torch = torch.tensor(u_fit[None, :, :],
                               dtype=torch.float,
                               requires_grad=False)
    y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)

    # Second-order dynamical system custom defined
    G1 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a)
    y_init_1 = torch.zeros((n_batch, n_a), dtype=torch.float)
    u_init_1 = torch.zeros((n_batch, n_b), dtype=torch.float)

    # Static non-linearity
    F_nl = StaticNonLin()

    # Setup optimizer
    optimizer = torch.optim.Adam([
        {
            'params': G1.parameters(),
            'lr': 1e-4
        },
        {
            'params': F_nl.parameters(),
            'lr': 1e-4
Example #7
0
    x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
    x_noise = x_noise.astype(np.float32)

    # In[Output]
    y_noise = np.copy(x_noise[:, [0]])
    y_nonoise = np.copy(x[:, [0]])

    # Prepare data
    u_torch = torch.tensor(u[None, ...],
                           dtype=torch.float,
                           requires_grad=False)
    y_meas_torch = torch.tensor(y_noise[None, ...], dtype=torch.float)
    y_true_torch = torch.tensor(y_nonoise[None, ...], dtype=torch.float)

    # In[Second-order dynamical system custom defined]
    G = SisoLinearDynamicalOperator(n_b, n_a)

    with torch.no_grad():
        G.b_coeff[0, 0, 0] = 0.01
        G.b_coeff[0, 0, 1] = 0.0

        G.a_coeff[0, 0, 0] = -0.9
        G.b_coeff[0, 0, 1] = 0.01

    # In[Setup optimizer]
    optimizer = torch.optim.Adam([
        {
            'params': G.parameters(),
            'lr': lr
        },
    ],
Example #8
0
    # Train on a single example
    u = u[[0], ...]
    y = y[[0], ...]

    batch_size = u.shape[0]
    seq_len = u.shape[1]
    n_u = u.shape[2]
    n_y = y.shape[2]

    # In[To tensors]
    u_torch = torch.tensor(u, dtype=torch.float32)
    y_torch = torch.tensor(y, dtype=torch.float32)

    # In[Deterministic model]
    G = SisoLinearDynamicalOperator(n_b, n_a, n_k=n_k)
    F = SisoStaticNonLinearity(n_hidden=10)
    model_folder = os.path.join("models", model_name)
    G.load_state_dict(torch.load(os.path.join(model_folder, "G.pkl")))
    F.load_state_dict(torch.load(os.path.join(model_folder, "F.pkl")))

    # In[Simulate]
    y_lin = G(u_torch)
    y_nl = F(y_lin)
    y_hat = y_nl

    # In[Detach]
    y_hat = y_hat.detach().numpy()

    # In[Predict]
    plt.plot(y0[0, :, 0], 'k', label='y0')
Example #9
0
    u = np.array(df_X[COL_U], dtype=np.float32)
    fs = np.array(df_X[COL_F].iloc[0], dtype=np.float32).item()
    N = y_meas.size
    ts = 1 / fs
    t = np.arange(N) * ts

    t_fit_start = 0
    t_fit_end = 100000
    t_test_start = 100000
    t_test_end = 188000
    t_skip = 1000  # skip for statistics

    # In[Instantiate models]

    # Create models
    G1 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a, n_k=1)
    G2 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a, n_k=0)
    F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')

    model_folder = os.path.join("models", model_name)
    # Create model parameters
    G1.load_state_dict(torch.load(os.path.join(model_folder, "G1.pkl")))
    F_nl.load_state_dict(torch.load(os.path.join(model_folder, "F_nl.pkl")))
    G2.load_state_dict(torch.load(os.path.join(model_folder, "G2.pkl")))

    # In[Predict]

    u_torch = torch.tensor(u[None, :, :])
    y1_lin = G1(u_torch)
    y1_nl = F_nl(y1_lin)
    y_hat = G2(y1_nl)
Example #10
0
    # scale state
    x = x / np.array([100.0, 10.0])

    # Add measurement noise
    std_noise_V = add_noise * 0.1
    #y_nonoise = np.copy(1 + x[:, [0]] + x[:, [0]]**2)
    y_nonoise = np.copy(x[:, [0, 1]])  #np.copy(1 + x[:, [0]] ** 3)
    y_noise = y_nonoise + np.random.randn(*y_nonoise.shape) * std_noise_V

    # Prepare data
    u_torch = torch.tensor(u[None, :, :],
                           dtype=torch.float,
                           requires_grad=False)
    y_meas_torch = torch.tensor(y_noise[None, :, :], dtype=torch.float)
    y_true_torch = torch.tensor(y_nonoise[None, :, :], dtype=torch.float)
    G = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
    nn_static = SisoStaticNonLinearity()

    # Setup optimizer
    params_lin = G.parameters()
    optimizer = torch.optim.Adam([{
        'params': params_lin,
        'lr': lr
    }, {
        'params': nn_static.parameters(),
        'lr': lr
    }],
                                 lr=lr)

    # In[Train]
    LOSS = []
    e = np.random.randn(N_sim_d)
    te = np.arange(N_sim_d) * ts
    d, u = control.forced_response(Hud, te, e)
    d_fast = d[n_skip_d:]
    d_fast = d_fast.reshape(-1, 1)
    y_fit_clean = np.copy(y_fit)
    y_fit = y_fit + d_fast

    # In[Prepare training tensors]
    u_fit_torch = torch.tensor(u_fit[None, :, :],
                               dtype=torch.float,
                               requires_grad=False)
    y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)

    # In[Prepare model]
    G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
    F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
    G2 = SisoLinearDynamicalOperator(n_b, n_a)

    H_inv = SisoLinearDynamicalOperator(2, 2, n_k=1)

    def model(u_in):
        y1_lin = G1(u_fit_torch)
        y1_nl = F_nl(y1_lin)
        y_hat = G2(y1_nl)
        return y_hat, y1_nl, y1_lin

    # In[Setup optimizer]
    optimizer_ADAM = torch.optim.Adam([
        {
            'params': G1.parameters(),