def set_g_d(self,
                G_ansatz,
                D_ansatz,
                G_loss,
                D_loss,
                layer_sizes,
                max_fun_evals=200,
                create_G=True,
                create_D=True):
        p0 = optnn.init_random_params(1, layer_sizes)

        if G_loss is not None:
            if create_G == True:
                self.G, _ = self.optimize_func(G_ansatz, p0, G_loss,
                                               max_fun_evals)
                save_var_to_file(self.G, self.id + "G", self.data_file)
            else:
                self.G = get_var_from_file(self.id + "G", self.data_file)
        else:
            self.G = lambda x, y: G_ansatz(None, x, y)

        if D_loss is not None:
            if create_D == True:
                self.D, _ = self.optimize_func(D_ansatz, p0, D_loss,
                                               max_fun_evals)
                save_var_to_file(self.D, self.id + "D", self.data_file)
            else:
                self.D = get_var_from_file(self.id + "D", self.data_file)
        else:
            self.D = lambda x, y: D_ansatz(None, x, y)
    def solve(self,
              get_resid,
              layer_sizes,
              max_fun_evals,
              create_U=True,
              neural_net_basis=True,
              U_ansatz_if_not_neural=None,
              x0=None):
        """
        Optimizes the solution ansatz U to minimize the local equation defined by get_resid. 
        The boundary and initial conditions are assumed to already have been fit by self.G and self.D
        By default, a neural network basis is used i.e. neural_net_basis=True. 
        If a different basis is used, U_ansatz_if_not_neural and x0 need to be given. 
        """
        if neural_net_basis:
            U = lambda params, x, y: self.G(x, y) + self.D(
                x, y) * optnn.neural_net_predict(
                    params,
                    np.array([x, y]).reshape(1, 2))
            x0 = optnn.init_random_params(1, layer_sizes)
        else:
            U = U_ansatz_if_not_neural

        resid = get_resid(U)
        if create_U:
            self.U, self.pstar_U = self.optimize_func(
                U, x0, self.get_local_eq_loss_function(resid), max_fun_evals)
            save_var_to_file(self.U, self.id + "U", self.data_file)
            save_var_to_file(self.pstar_U, self.id + "pstar_U", self.data_file)
        else:
            self.U = get_var_from_file(self.id + "U", self.data_file)
            self.pstar_U = get_var_from_file(self.id + "pstar_U",
                                             self.data_file)
        self.resid = resid
        return 0
def optimize_u(G,
               params_G,
               D,
               params_D,
               layer_sizes,
               nx,
               nt,
               L,
               t_max,
               max_function_evals=100,
               max_iterations=100):
    """
    Returns u(x,t) to fit wave equation.
    """

    x0 = rfc.init_random_params(1, layer_sizes)


    u=lambda params, x,t: G(params_G,x,t)+\
        D(params_D,x,t)*rfc.neural_net_predict(params, np.array([x,t]))

    #res_arr=vresid(x0,X,T)

    #plot_result(u,G,D,x0, params_G,params_D,t_max,L,nx*5,nt*5)

    loss_function = get_loss_function(u, nx, nt, L, t_max)
    loss_grad = grad(loss_function, 0)
    p, fval=rfc.unflattened_lbfgs(loss_function, loss_grad, x0, \
        max_feval=max_function_evals, max_iter=max_iterations, callback=None)
    # plot_result(u,G,D,p, params_G,params_D,t_max,L,nx*5,nt*5)
    return u, p
 def solve(self, ls_phi, max_feval, create_U):
     Phi = lambda params, x: optnn.neural_net_predict(params, np.array(x))
     U = lambda params, x: self.G(x) + self.D(x) * Phi(params, x)
     x0 = optnn.init_random_params(1, ls_phi)
     self.set_residual(U)
     loss_function = self.get_loss_function()
     loss_grad = grad(loss_function, 0)
     if create_U:
         p_U, _=optnn.unflattened_lbfgs(loss_function, loss_grad, x0, \
                 max_feval=max_feval, max_iter=max_feval, callback=None)
         self.U = lambda x: U(p_U, x)
         self.p_U = p_U
         save_var_to_file(self.U, self.id + "U", self.data_file)
         save_var_to_file(self.p_U, self.id + "pstar_U", self.data_file)
     else:
         self.U = get_var_from_file(self.id + "U", self.data_file)
         self.p_U = get_var_from_file(self.id + "pstar_U", self.data_file)
Beispiel #5
0
def test():
    fname = FILE_TO_STORE_G

    #IC
    g0expr = 'np.sin(x)'
    #g1expr='np.cos(x)'
    g1expr = '0.'
    #BC
    f0expr = '0'
    f1expr = '0'
    #Limits and number of points
    L = 2 * np.pi
    t_max = 0.5
    nx = 4
    nt = 4
    #Network hyperparameters
    layer_sizes = [2, 10, 10, 10, 3]
    max_function_evals = 100
    max_iterations = 100

    #Create or load G
    G = lambda params, x, t: rfc.neural_net_predict(params, np.array([x, t]))
    g0, g1, f0, f1 = get_functions_from_strings(g0expr, g1expr, f0expr, f1expr)
    loss_function = get_G_loss_function(G, g0, g1, f0, f1, t_max, L, N=5 * nx)

    G,p_G=create_or_load_trained_f(G, loss_function, g0expr, g1expr, f0expr, f1expr,L, t_max, \
        layer_sizes,fname=FILE_TO_STORE_G, create_f=False,maxiter=max_iterations,maxfuneval=max_function_evals)

    #Create or load D
    D = lambda params, x, t: (L - x) * x * t * (t_max - t) * np.ones(3)

    #Train u
    x0 = rfc.init_random_params(1, layer_sizes)
    u=lambda params, x,t: G(p_G,x,t)+\
        D(None,x,t)*rfc.neural_net_predict(params, np.array([x,t]))

    #Plots
    plot_vector_function_all_elements(G,
                                      p_G,
                                      "G",
                                      g0expr,
                                      g1expr,
                                      f0expr,
                                      f1expr,
                                      t_max,
                                      L,
                                      N=nx * 5)
    plt.savefig(FIGNAME_G, bbox_inches="tight")
    plot_vector_function_all_elements(D,
                                      None,
                                      "D",
                                      g0expr,
                                      g1expr,
                                      f0expr,
                                      f1expr,
                                      t_max,
                                      L,
                                      N=nx * 5)
    plt.savefig(FIGNAME_D, bbox_inches="tight")

    loss_function = get_loss_function_wave_eq_first_order(u, nx, nt, L, t_max)
    loss_grad = grad(loss_function, 0)
    p_U, fval=rfc.unflattened_lbfgs(loss_function, loss_grad, x0, \
        max_feval=max_function_evals, max_iter=max_iterations, callback=None)
    plot_vector_function_all_elements(u,
                                      p_U,
                                      "U",
                                      g0expr,
                                      g1expr,
                                      f0expr,
                                      f1expr,
                                      t_max,
                                      L,
                                      N=nx * 5)
    plt.savefig(FIGNAME_U, bbox_inches="tight")
    resid = get_resid_wave_eq_first_order(u)
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    plot_2D_function(ax, resid, "Residual", p_U, t_max, L, nx * 5)
    plt.title("Residual")
    plt.show(block=True)