コード例 #1
0
    n_u = u.shape[2]
    n_y = y.shape[2]

    # In[To tensors]
    u_torch = torch.tensor(u, dtype=torch.float32)
    y_torch = torch.tensor(y, dtype=torch.float32)

    # In[Deterministic model]
    G = SisoLinearDynamicalOperator(n_b, n_a, n_k=n_k)
    F = SisoStaticNonLinearity(n_hidden=10)

    # In[Log-likelihood]

    optimizer = torch.optim.Adam([
        {
            'params': G.parameters(),
            'lr': lr
        },
        {
            'params': F.parameters(),
            'lr': lr
        },
    ],
                                 lr=lr)

    # In[Train]
    LOSS = []
    start_time = time.time()
    for itr in range(0, num_iter):

        optimizer.zero_grad()
コード例 #2
0
    G2 = SisoLinearDynamicalOperator(n_b, n_a)

    log_sigma_hat = torch.tensor(
        np.log(1.0),
        requires_grad=True)  # torch.randn(1, requires_grad = True)

    def model(u_in):
        y1_lin = G1(u_fit_torch)
        y1_nl = F_nl(y1_lin)
        y_hat = G2(y1_nl)
        return y_hat, y1_nl, y1_lin

    # In[Setup optimizer]
    optimizer = torch.optim.Adam([
        {
            'params': G1.parameters(),
            'lr': lr
        },
        {
            'params': G2.parameters(),
            'lr': lr
        },
        {
            'params': F_nl.parameters(),
            'lr': lr
        },
        {
            'params': log_sigma_hat,
            'lr': 2e-5
        },
    ],
コード例 #3
0
    # In[Prepare model]
    G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
    F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
    G2 = SisoLinearDynamicalOperator(n_b, n_a)

    def model(u_in):
        y1_lin = G1(u_fit_torch)
        y1_nl = F_nl(y1_lin)
        y_hat = G2(y1_nl)
        return y_hat, y1_nl, y1_lin

    # In[Setup optimizer]
    optimizer_ADAM = torch.optim.Adam([
        {
            'params': G1.parameters(),
            'lr': lr_ADAM
        },
        {
            'params': G2.parameters(),
            'lr': lr_ADAM
        },
        {
            'params': F_nl.parameters(),
            'lr': lr_ADAM
        },
    ],
                                      lr=lr_ADAM)

    optimizer_LBFGS = torch.optim.LBFGS(list(G1.parameters()) +
                                        list(G2.parameters()) +
コード例 #4
0
    u_fit = u[:n_fit:decimate]
    t_fit = t[0:n_fit:decimate]

    # In[Prepare data]
    u_fit_torch = torch.tensor(u_fit[None, ...], dtype=torch.float, requires_grad=False)
    y_fit_torch = torch.tensor(y_fit[None, ...], dtype=torch.float, requires_grad=False)
    y_hidden_torch = torch.tensor(y_fit[None, ...], dtype=torch.float, requires_grad=True)
    # optimize on the output to manage the feedback connection
    # In[First dynamical system custom defined]
    G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k)
    # Static non-linearity
    F_nl = SisoStaticNonLinearity()

    # Setup optimizer
    optimizer = torch.optim.Adam([
        {'params': G1.parameters(), 'lr': lr},
        {'params': F_nl.parameters(), 'lr': lr},
        {'params': [y_hidden_torch], 'lr': 1e-3},
    ], lr=lr)


    # In[Structure]
    #   u ---> ----> G ------> y_lin
    #         |
    #         |
    #   y_nl  <----- F <------ y_hidden (= y_lin, enforced by loss_consistency)
    #  The feedback loop is cut and the difference y_lin - y_hidden is penalized

    # In[Train]
    LOSS = []
    LOSS_FIT = []
コード例 #5
0
    # In[Second-order dynamical system custom defined]
    G = SisoLinearDynamicalOperator(n_b, n_a)
    H_inv = SisoLinearDynamicalOperator(2, 2, n_k=1)

    with torch.no_grad():
        G.b_coeff[0, 0, 0] = 0.01
        G.b_coeff[0, 0, 1] = 0.0

        G.a_coeff[0, 0, 0] = -0.9
        G.b_coeff[0, 0, 1] = 0.01

    # In[Setup optimizer]
    optimizer = torch.optim.Adam([
        {
            'params': G.parameters(),
            'lr': lr
        },
        {
            'params': H_inv.parameters(),
            'lr': lr
        },
    ],
                                 lr=lr)

    # In[Train]
    LOSS = []
    start_time = time.time()
    for itr in range(0, num_iter):

        optimizer.zero_grad()
コード例 #6
0
ファイル: RLC_wiener_fit.py プロジェクト: jhurreaq/dynonet
    std_noise_V = add_noise * 0.1
    #y_nonoise = np.copy(1 + x[:, [0]] + x[:, [0]]**2)
    y_nonoise = np.copy(x[:, [0, 1]])  #np.copy(1 + x[:, [0]] ** 3)
    y_noise = y_nonoise + np.random.randn(*y_nonoise.shape) * std_noise_V

    # Prepare data
    u_torch = torch.tensor(u[None, :, :],
                           dtype=torch.float,
                           requires_grad=False)
    y_meas_torch = torch.tensor(y_noise[None, :, :], dtype=torch.float)
    y_true_torch = torch.tensor(y_nonoise[None, :, :], dtype=torch.float)
    G = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
    nn_static = SisoStaticNonLinearity()

    # Setup optimizer
    params_lin = G.parameters()
    optimizer = torch.optim.Adam([{
        'params': params_lin,
        'lr': lr
    }, {
        'params': nn_static.parameters(),
        'lr': lr
    }],
                                 lr=lr)

    # In[Train]
    LOSS = []
    start_time = time.time()
    for itr in range(0, num_iter):

        optimizer.zero_grad()
コード例 #7
0
    G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
    F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
    G2 = SisoLinearDynamicalOperator(n_b, n_a)

    H_inv = SisoLinearDynamicalOperator(2, 2, n_k=1)

    def model(u_in):
        y1_lin = G1(u_fit_torch)
        y1_nl = F_nl(y1_lin)
        y_hat = G2(y1_nl)
        return y_hat, y1_nl, y1_lin

    # In[Setup optimizer]
    optimizer_ADAM = torch.optim.Adam([
        {
            'params': G1.parameters(),
            'lr': lr_ADAM
        },
        {
            'params': G2.parameters(),
            'lr': lr_ADAM
        },
        {
            'params': F_nl.parameters(),
            'lr': lr_ADAM
        },
        {
            'params': H_inv.parameters(),
            'lr': lr_ADAM
        },
    ],