def solve(self,
              get_resid,
              layer_sizes,
              max_fun_evals,
              create_U=True,
              neural_net_basis=True,
              U_ansatz_if_not_neural=None,
              x0=None):
        """
        Optimizes the solution ansatz U to minimize the local equation defined by get_resid. 
        The boundary and initial conditions are assumed to already have been fit by self.G and self.D
        By default, a neural network basis is used i.e. neural_net_basis=True. 
        If a different basis is used, U_ansatz_if_not_neural and x0 need to be given. 
        """
        if neural_net_basis:
            U = lambda params, x, y: self.G(x, y) + self.D(
                x, y) * optnn.neural_net_predict(
                    params,
                    np.array([x, y]).reshape(1, 2))
            x0 = optnn.init_random_params(1, layer_sizes)
        else:
            U = U_ansatz_if_not_neural

        resid = get_resid(U)
        if create_U:
            self.U, self.pstar_U = self.optimize_func(
                U, x0, self.get_local_eq_loss_function(resid), max_fun_evals)
            save_var_to_file(self.U, self.id + "U", self.data_file)
            save_var_to_file(self.pstar_U, self.id + "pstar_U", self.data_file)
        else:
            self.U = get_var_from_file(self.id + "U", self.data_file)
            self.pstar_U = get_var_from_file(self.id + "pstar_U",
                                             self.data_file)
        self.resid = resid
        return 0
def optimize_u(G,
               params_G,
               D,
               params_D,
               layer_sizes,
               nx,
               nt,
               L,
               t_max,
               max_function_evals=100,
               max_iterations=100):
    """
    Returns u(x,t) to fit wave equation.
    """

    x0 = rfc.init_random_params(1, layer_sizes)


    u=lambda params, x,t: G(params_G,x,t)+\
        D(params_D,x,t)*rfc.neural_net_predict(params, np.array([x,t]))

    #res_arr=vresid(x0,X,T)

    #plot_result(u,G,D,x0, params_G,params_D,t_max,L,nx*5,nt*5)

    loss_function = get_loss_function(u, nx, nt, L, t_max)
    loss_grad = grad(loss_function, 0)
    p, fval=rfc.unflattened_lbfgs(loss_function, loss_grad, x0, \
        max_feval=max_function_evals, max_iter=max_iterations, callback=None)
    # plot_result(u,G,D,p, params_G,params_D,t_max,L,nx*5,nt*5)
    return u, p
def test():
    """
    If first time running code set create_f=True in G and D creation
    """
    fname = FILE_TO_STORE_G

    #IC
    g0expr = 'np.sin(x)'
    #g1expr='np.cos(x)'
    g1expr = '0.'
    #BC
    f0expr = '0'
    f1expr = '0'
    #Limits and number of points
    L = 2 * np.pi
    t_max = 2
    nx = 12
    nt = 8
    #Network hyperparameters
    layer_sizes = [2, 10, 10, 10, 1]
    max_function_evals = 200
    max_iterations = 200

    G = lambda params, x, t: rfc.neural_net_predict(params, np.array([x, t]))
    g0, g1, f0, f1 = get_functions_from_strings(g0expr, g1expr, f0expr, f1expr)
    loss_function = get_G_loss_function(G, g0, g1, f0, f1, t_max, L, N=15)
    G,param_G=create_or_load_trained_f(G, loss_function, g0expr, g1expr, f0expr, f1expr,L, t_max,  \
        layer_sizes, N=15,maxiter=300,maxfuneval=300,fname=FILE_TO_STORE_G, create_f=False)

    D = lambda params, x, t: x * (L - x) * t**2 * rfc.neural_net_predict(
        params, np.array([x, t]))
    loss_function = get_D_loss_function(D, t_max, L, 15)
    D,param_D=create_or_load_trained_f(D, loss_function, g0expr, g1expr, f0expr, f1expr,L, t_max, \
        layer_sizes,N=15,maxiter=100,maxfuneval=100,fname=FILE_TO_STORE_D, create_f=False)
    u, param_u = optimize_u(G,
                            param_G,
                            D,
                            param_D,
                            layer_sizes,
                            nx,
                            nt,
                            L,
                            t_max,
                            max_function_evals=200,
                            max_iterations=200)
    print("Loss function:%.2f", loss_function(param_u))
    plot_result(u, G, D, param_u, param_G, param_D, t_max, L, 10 * nx, 10 * nt)
Example #4
0
def test():

    #The eval stuff is done to be able to identify the functions that G was supposed to fit
    #when saving and loading.
    #Initial conditions
    g0expr = 'np.sin(x)'
    #g1expr='np.cos(x)'
    g1expr = '0.'
    #Boundary conditions
    f0expr = '0'
    f1expr = '0'
    #Limits and number of points
    L = 2 * np.pi
    t_max = 4
    N = 15
    #Network hyperparameters
    layer_sizes = [2, 7, 7, 1]
    G = lambda params, x, t: rfc.neural_net_predict(params, np.array([x, t]))
    g0, g1, f0, f1 = get_functions_from_strings(g0expr, g1expr, f0expr, f1expr)
    loss_function = get_G_loss_function(G, g0, g1, f0, f1, t_max, L, N)
    G,p_G=create_or_load_trained_f(G, loss_function, g0expr, g1expr, f0expr, f1expr,L, t_max, \
        layer_sizes,fname=FILE_TO_STORE_G, create_f=False,maxiter=400,maxfuneval=400)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    g0, g1, f0, f1 = get_functions_from_strings(g0expr, g1expr, f0expr, f1expr)
    #plot_targets(ax, g0,g1,f0,f1,t_max,L,N)

    plot_2D_function(ax, G, "$G$", p_G, t_max, L, N)
    plt.show()

    D = lambda params, x, t: x * (L - x) * t**2 * rfc.neural_net_predict(
        params, np.array([x, t]))
    loss_function = get_D_loss_function(D, t_max, L, N)
    D,p_D=create_or_load_trained_f(D, loss_function, g0expr, g1expr, f0expr, f1expr,L, t_max, \
        layer_sizes,maxiter=300,maxfuneval=300,fname=FILE_TO_STORE_D, create_f=False)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    g0, g1, f0, f1 = get_functions_from_strings(g0expr, g1expr, f0expr, f1expr)
    #plot_targets(ax, g0,g1,f0,f1,t_max,L,N)

    plot_2D_function(ax, D, "$D$", p_D, t_max, L, N)
    set_labels_and_legends(ax)
    plt.show()
    error_plots(G, D, p_G, p_D, g0, g1, f0, f1, t_max, L, N)
def test_G(g0expr,g1expr,f0expr,f1expr,t_max,L, N, maxiter, create_f,layer_sizes=[2,7,7,3]):

    
    G=lambda params, x,t: rfc.neural_net_predict(params, np.array([x,t]))
    g0,g1,f0,f1=get_functions_from_strings(g0expr,g1expr,f0expr,f1expr)
    loss_function=get_G_loss_function(G,g0,g1,f0,f1,t_max,L,N)
    
    #G
    G,p_G=create_or_load_trained_f(G, loss_function, g0expr, g1expr, f0expr, f1expr,L, t_max, \
        layer_sizes,fname=FILE_TO_STORE_G, create_f=create_f,maxiter=maxiter,maxfuneval=maxiter)

    plot_vector_function_all_elements(G, p_G, "G", g0expr,g1expr,f0expr,f1expr, t_max,L,N)
Example #6
0
def test_wave_1D():
    L = 2 * np.pi
    t_max = 3
    X, T = generate_square_domain(x_min=0.,
                                  x_max=L,
                                  y_min=0.,
                                  y_max=t_max,
                                  nx=5,
                                  ny=5)
    X_plot, T_plot = generate_square_domain(x_min=0.,
                                            x_max=L,
                                            y_min=0.,
                                            y_max=t_max,
                                            nx=30,
                                            ny=30)

    G_ansatz = lambda params, x, t: optnn.neural_net_predict(
        params, np.array([x, t]))
    G_ansatz = lambda params, x, t: np.sin(x)

    D_ansatz = lambda params, x, t: x * (
        L - x) * t**2 * optnn.neural_net_predict(params, np.array([x, t]))
    G_loss=lf.get_G_loss_function_wave_eq(G_ansatz,g0=lambda x: np.sin(x),g1=lambda x:0.,\
        f0=lambda x: 0.,f1=lambda x:0,t_max=t_max,L=L,N=30)
    D_loss = lf.get_D_loss_function_wave_eq(D_ansatz, t_max=t_max, L=L, N=5)

    #sys.stdout = open(LOCAL_PATH+"shelloutput_wave1D", 'w')
    wave1D=cdm.TwoVariablesOneUnknown_PDE_Solver(\
        domain=[[X,T]],id="Wave1DNoExact", plot_domain=[[X_plot, T_plot]], \
        local_path=LOCAL_PATH,var_ids=["x","t","u"])
    #wave1D.exact_solution=lambda params,x,t: np.sin(x)*np.cos(t)
    wave1D.get_resid = lf.get_resid_wave_eq_1D
    wave1D.set_g_d(G_ansatz, D_ansatz, G_loss=None, D_loss=D_loss, layer_sizes=[2,10,10,1], \
        create_G=False, create_D=False, max_fun_evals=200)
    wave1D.solve(lf.get_resid_wave_eq_1D, [2, 10, 10, 10, 10, 1],
                 max_fun_evals=400,
                 create_U=True)
    wave1D.plot_results()
Example #7
0
def test_laplace_2D_L_shape():
    #THIS DOESNT WORK FOR NOW. NEED LOSS FUNCTION FOR D.
    #TODO: MAKE D LOSS.
    L_shape = generate_L_shaped_domain(nx=10,
                                       ny=10,
                                       Lx1=1.,
                                       Lx2=2.,
                                       Ly1=1.,
                                       Ly2=2.)
    L_shape_plot = generate_L_shaped_domain(nx=30,
                                            ny=30,
                                            Lx1=1.,
                                            Lx2=2.,
                                            Ly1=1.,
                                            Ly2=2.)
    G_ansatz = lambda params, x, t: optnn.neural_net_predict(
        params, np.array([x, t]))
    D_ansatz = lambda params, x, t: optnn.neural_net_predict(
        params, np.array([x, t]))
    laplace2d=cdm.TwoVariablesOneUnknown_PDE_Solver(domain=L_shape,plot_domain=L_shape_plot, \
        id="Laplace2D", local_path=LOCAL_PATH, var_ids=["x","y","T"])
    laplace2d.set_g_d(G_ansatz, D_ansatz, G_loss=None, D_loss=None, layer_sizes=[2,10,10,1], \
        create_G=False, create_D=True)
    laplace2d.plot_results()
 def solve(self, ls_phi, max_feval, create_U):
     Phi = lambda params, x: optnn.neural_net_predict(params, np.array(x))
     U = lambda params, x: self.G(x) + self.D(x) * Phi(params, x)
     x0 = optnn.init_random_params(1, ls_phi)
     self.set_residual(U)
     loss_function = self.get_loss_function()
     loss_grad = grad(loss_function, 0)
     if create_U:
         p_U, _=optnn.unflattened_lbfgs(loss_function, loss_grad, x0, \
                 max_feval=max_feval, max_iter=max_feval, callback=None)
         self.U = lambda x: U(p_U, x)
         self.p_U = p_U
         save_var_to_file(self.U, self.id + "U", self.data_file)
         save_var_to_file(self.p_U, self.id + "pstar_U", self.data_file)
     else:
         self.U = get_var_from_file(self.id + "U", self.data_file)
         self.p_U = get_var_from_file(self.id + "pstar_U", self.data_file)
Example #9
0
def test():
    fname = FILE_TO_STORE_G

    #IC
    g0expr = 'np.sin(x)'
    #g1expr='np.cos(x)'
    g1expr = '0.'
    #BC
    f0expr = '0'
    f1expr = '0'
    #Limits and number of points
    L = 2 * np.pi
    t_max = 0.5
    nx = 4
    nt = 4
    #Network hyperparameters
    layer_sizes = [2, 10, 10, 10, 3]
    max_function_evals = 100
    max_iterations = 100

    #Create or load G
    G = lambda params, x, t: rfc.neural_net_predict(params, np.array([x, t]))
    g0, g1, f0, f1 = get_functions_from_strings(g0expr, g1expr, f0expr, f1expr)
    loss_function = get_G_loss_function(G, g0, g1, f0, f1, t_max, L, N=5 * nx)

    G,p_G=create_or_load_trained_f(G, loss_function, g0expr, g1expr, f0expr, f1expr,L, t_max, \
        layer_sizes,fname=FILE_TO_STORE_G, create_f=False,maxiter=max_iterations,maxfuneval=max_function_evals)

    #Create or load D
    D = lambda params, x, t: (L - x) * x * t * (t_max - t) * np.ones(3)

    #Train u
    x0 = rfc.init_random_params(1, layer_sizes)
    u=lambda params, x,t: G(p_G,x,t)+\
        D(None,x,t)*rfc.neural_net_predict(params, np.array([x,t]))

    #Plots
    plot_vector_function_all_elements(G,
                                      p_G,
                                      "G",
                                      g0expr,
                                      g1expr,
                                      f0expr,
                                      f1expr,
                                      t_max,
                                      L,
                                      N=nx * 5)
    plt.savefig(FIGNAME_G, bbox_inches="tight")
    plot_vector_function_all_elements(D,
                                      None,
                                      "D",
                                      g0expr,
                                      g1expr,
                                      f0expr,
                                      f1expr,
                                      t_max,
                                      L,
                                      N=nx * 5)
    plt.savefig(FIGNAME_D, bbox_inches="tight")

    loss_function = get_loss_function_wave_eq_first_order(u, nx, nt, L, t_max)
    loss_grad = grad(loss_function, 0)
    p_U, fval=rfc.unflattened_lbfgs(loss_function, loss_grad, x0, \
        max_feval=max_function_evals, max_iter=max_iterations, callback=None)
    plot_vector_function_all_elements(u,
                                      p_U,
                                      "U",
                                      g0expr,
                                      g1expr,
                                      f0expr,
                                      f1expr,
                                      t_max,
                                      L,
                                      N=nx * 5)
    plt.savefig(FIGNAME_U, bbox_inches="tight")
    resid = get_resid_wave_eq_first_order(u)
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    plot_2D_function(ax, resid, "Residual", p_U, t_max, L, nx * 5)
    plt.title("Residual")
    plt.show(block=True)
 def initialize_net_fun_from_params(layer_sizes):
     return lambda params, x: optnn.neural_net_predict(np.array(x))