def main():
    kf = tf.Variable(0.05)
    D = tf.Variable(1.0)

    def pde(x, y):
        ca, cb = y[:, 0:1], y[:, 1:2]
        dca_t = dde.grad.jacobian(y, x, i=0, j=1)
        dca_xx = dde.grad.hessian(y, x, component=0, i=0, j=0)
        dcb_t = dde.grad.jacobian(y, x, i=1, j=1)
        dcb_xx = dde.grad.hessian(y, x, component=1, i=0, j=0)
        eq_a = dca_t - 1e-3 * D * dca_xx + kf * ca * cb ** 2
        eq_b = dcb_t - 1e-3 * D * dcb_xx + 2 * kf * ca * cb ** 2
        return [eq_a, eq_b]

    def fun_bc(x):
        return 1 - x[:, 0:1]

    def fun_init(x):
        return np.exp(-20 * x[:, 0:1])

    geom = dde.geometry.Interval(0, 1)
    timedomain = dde.geometry.TimeDomain(0, 10)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc_a = dde.DirichletBC(
        geomtime, fun_bc, lambda _, on_boundary: on_boundary, component=0
    )
    bc_b = dde.DirichletBC(
        geomtime, fun_bc, lambda _, on_boundary: on_boundary, component=1
    )
    ic1 = dde.IC(geomtime, fun_init, lambda _, on_initial: on_initial, component=0)
    ic2 = dde.IC(geomtime, fun_init, lambda _, on_initial: on_initial, component=1)

    observe_x, Ca, Cb = gen_traindata()
    ptset = dde.bc.PointSet(observe_x)
    observe_y1 = dde.DirichletBC(
        geomtime, ptset.values_to_func(Ca), lambda x, _: ptset.inside(x), component=0
    )
    observe_y2 = dde.DirichletBC(
        geomtime, ptset.values_to_func(Cb), lambda x, _: ptset.inside(x), component=1
    )

    data = dde.data.TimePDE(
        geomtime,
        pde,
        [bc_a, bc_b, ic1, ic2, observe_y1, observe_y2],
        num_domain=2000,
        num_boundary=100,
        num_initial=100,
        anchors=observe_x,
        num_test=50000,
    )
    net = dde.maps.FNN([2] + [20] * 3 + [2], "tanh", "Glorot uniform")
    model = dde.Model(data, net)
    model.compile("adam", lr=0.001)
    variable = dde.callbacks.VariableValue(
        [kf, D], period=1000, filename="variables.dat"
    )
    losshistory, train_state = model.train(epochs=80000, callbacks=[variable])
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 2
0
def main():
    spatial_domain = dde.geometry.Rectangle(xmin=[-0.5, -0.5], xmax=[1, 1.5])

    boundary_condition_u = dde.DirichletBC(spatial_domain,
                                           u_func,
                                           lambda _, on_boundary: on_boundary,
                                           component=0)
    boundary_condition_v = dde.DirichletBC(spatial_domain,
                                           v_func,
                                           lambda _, on_boundary: on_boundary,
                                           component=1)
    boundary_condition_right_p = dde.DirichletBC(spatial_domain,
                                                 p_func,
                                                 boundary_outflow,
                                                 component=2)

    data = dde.data.TimePDE(
        spatial_domain,
        pde,
        [
            boundary_condition_u, boundary_condition_v,
            boundary_condition_right_p
        ],
        num_domain=2601,
        num_boundary=400,
        num_test=100000,
    )

    net = dde.maps.FNN([2] + 4 * [50] + [3], "tanh", "Glorot normal")

    model = dde.Model(data, net)

    model.compile("adam", lr=1e-3)
    model.train(epochs=30000)
    model.compile("L-BFGS-B")
    losshistory, train_state = model.train()

    X = spatial_domain.random_points(100000)
    output = model.predict(X)
    u_pred = output[:, 0]
    v_pred = output[:, 1]
    p_pred = output[:, 2]

    u_exact = u_func(X).reshape(-1)
    v_exact = v_func(X).reshape(-1)
    p_exact = p_func(X).reshape(-1)

    f = model.predict(X, operator=pde)

    l2_difference_u = dde.metrics.l2_relative_error(u_exact, u_pred)
    l2_difference_v = dde.metrics.l2_relative_error(v_exact, v_pred)
    l2_difference_p = dde.metrics.l2_relative_error(p_exact, p_pred)
    residual = np.mean(np.absolute(f))

    print("Mean residual:", residual)
    print("L2 relative error in u:", l2_difference_u)
    print("L2 relative error in v:", l2_difference_v)
    print("L2 relative error in p:", l2_difference_p)
Exemplo n.º 3
0
def main():
    C = tf.Variable(2.0)

    def pde(x, y):
        dy_x = tf.gradients(y, x)[0]
        dy_x, dy_t = dy_x[:, 0:1], dy_x[:, 1:]
        dy_xx = tf.gradients(dy_x, x)[0][:, 0:1]
        return (
            dy_t - C * dy_xx + tf.exp(-x[:, 1:]) *
            (tf.sin(np.pi * x[:, 0:1]) - np.pi**2 * tf.sin(np.pi * x[:, 0:1])))

    def func(x):
        return np.sin(np.pi * x[:, 0:1]) * np.exp(-x[:, 1:])

    geom = dde.geometry.Interval(-1, 1)
    timedomain = dde.geometry.TimeDomain(0, 1)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc = dde.DirichletBC(geomtime, func, lambda _, on_boundary: on_boundary)
    ic = dde.IC(geomtime, func, lambda _, on_initial: on_initial)

    observe_x = np.vstack((np.linspace(-1, 1, num=10), np.full((10), 1))).T
    ptset = dde.bc.PointSet(observe_x)
    observe_y = dde.DirichletBC(geomtime,
                                ptset.values_to_func(func(observe_x)),
                                lambda x, _: ptset.inside(x))

    data = dde.data.TimePDE(
        geomtime,
        1,
        pde,
        [bc, ic, observe_y],
        num_domain=40,
        num_boundary=20,
        num_initial=10,
        anchors=observe_x,
        func=func,
        num_test=10000,
    )

    layer_size = [2] + [32] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)

    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    variable = dde.callbacks.VariableValue(C, period=1000)
    losshistory, train_state = model.train(epochs=50000, callbacks=[variable])

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 4
0
def main():
    def ode_system(x, y):
        """ODE system.
        dy1/dx = y2
        dy2/dx = -y1
        """
        y1, y2, y3 = y[:, 0:1], y[:, 1:2], y[:, 2:]
        dy1_x = tf.gradients(y1, x)[0]
        dy2_x = tf.gradients(y2, x)[0]
        dy3_x = tf.gradients(y3, x)[0]
        return [dy1_x - y2, dy2_x + y1, dy3_x + y2]

    def boundary(x, on_boundary):
        return on_boundary and np.isclose(x[0], 0)

    def func(x):
        """
        y1 = sin(x)
        y2 = cos(x)
        """
        return np.hstack((np.sin(x), np.cos(x), -np.sin(x)))

    geom = dde.geometry.Cuboid([0, 0, 0], [10, 10, 10])
    bc1 = dde.DirichletBC(geom,
                          lambda x: np.sin(x[:, 0:1]),
                          boundary,
                          component=0)
    bc2 = dde.DirichletBC(geom,
                          lambda x: np.cos(x[:, 0:1]),
                          boundary,
                          component=1)
    bc3 = dde.DirichletBC(geom,
                          lambda x: np.sin(x[:, 0:1]),
                          boundary,
                          component=2)
    data = dde.data.PDE(geom,
                        3,
                        ode_system, [bc1, bc2, bc3],
                        350,
                        64,
                        num_test=100)

    layer_size = [3] + [50] * 3 + [3]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=20000)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 5
0
def main():
    def pde(x, y):
        dy_x = tf.gradients(y, x)[0]
        dy_x, dy_y = dy_x[:, 0:1], dy_x[:, 1:]
        dy_xx = tf.gradients(dy_x, x)[0][:, 0:1]
        dy_yy = tf.gradients(dy_y, x)[0][:, 1:]
        return -dy_xx - dy_yy - 1

    def boundary(_, on_boundary):
        return on_boundary

    def func(x):
        return np.zeros([len(x), 1])

    geom = dde.geometry.Polygon([[0, 0], [1, 0], [1, -1], [-1, -1], [-1, 1], [0, 1]])
    bc = dde.DirichletBC(geom, func, boundary)

    data = dde.data.PDE(geom, pde, bc, num_domain=1200, num_boundary=120, num_test=1500)
    net = dde.maps.FNN([2] + [50] * 4 + [1], "tanh", "Glorot uniform")
    model = dde.Model(data, net)

    model.compile("adam", lr=0.001)
    model.train(epochs=50000)
    model.compile("L-BFGS-B")
    losshistory, train_state = model.train()
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 6
0
def main():
    def pde(x, y):
        dy_x = tf.gradients(y, x)[0]
        dy_xx = tf.gradients(dy_x, x)[0]
        return dy_xx - 2

    def boundary_l(x, on_boundary):
        return on_boundary and np.isclose(x[0], -1)

    def boundary_r(x, on_boundary):
        return on_boundary and np.isclose(x[0], 1)

    def func(x):
        return (x + 1)**2

    geom = dde.geometry.Interval(-1, 1)
    bc_l = dde.DirichletBC(geom, func, boundary_l)
    bc_r = dde.RobinBC(geom, lambda X, y: y, boundary_r)
    data = dde.data.PDE(geom,
                        pde, [bc_l, bc_r],
                        16,
                        2,
                        solution=func,
                        num_test=100)

    layer_size = [1] + [50] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=10000)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 7
0
def main():
    geom = dde.geometry.Sphere([0, 0, 0], 1)
    bc = dde.DirichletBC(geom, func, lambda _, on_boundary: on_boundary)

    data = dde.data.FPDE(
        geom,
        fpde,
        alpha,
        bc,
        [8, 8, 100],
        num_domain=256,
        num_boundary=1,
        solution=func,
    )

    net = dde.maps.FNN([3] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(
        lambda x, y: (1 - tf.reduce_sum(x**2, axis=1, keepdims=True)) * y)

    model = dde.Model(data, net)
    model.compile("adam", lr=1e-3)
    losshistory, train_state = model.train(epochs=10000)
    dde.saveplot(losshistory, train_state, issave=False, isplot=True)

    X = geom.random_points(10000)
    y_true = func(X)
    y_pred = model.predict(X)
    print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))
    np.savetxt("test.dat", np.hstack((X, y_true, y_pred)))
Exemplo n.º 8
0
def main():
    geom = dde.geometry.Interval(-1, 1)
    timedomain = dde.geometry.TimeDomain(0, 0.99)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc = dde.DirichletBC(geomtime, lambda x: 0, lambda _, on_boundary: on_boundary)
    ic = dde.IC(
        geomtime, lambda x: -np.sin(np.pi * x[:, 0:1]), lambda _, on_initial: on_initial
    )

    data = dde.data.TimePDE(
        geomtime, pde, [bc, ic], num_domain=2540, num_boundary=80, num_initial=160
    )
    net = dde.maps.FNN([2] + [20] * 3 + [1], "tanh", "Glorot normal")
    model = dde.Model(data, net)

    model.compile("adam", lr=1e-3)
    model.train(epochs=15000)
    model.compile("L-BFGS-B")
    losshistory, train_state = model.train()
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)

    X, y_true = gen_testdata()
    y_pred = model.predict(X)
    f = model.predict(X, operator=pde)
    print("Mean residual:", np.mean(np.absolute(f)))
    print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))
    np.savetxt("test.dat", np.hstack((X, y_true, y_pred)))
Exemplo n.º 9
0
def main():
    def pde(x, y):
        dy_tt = dde.grad.hessian(y, x, i=1, j=1)
        dy_xx = dde.grad.hessian(y, x, i=0, j=0)
        return dy_tt - C**2 * dy_xx

    def func(x):
        x, t = np.split(x, 2, axis=1)
        return np.sin(np.pi * x) * np.cos(C * np.pi * t) + np.sin(
            A * np.pi * x) * np.cos(A * C * np.pi * t)

    geom = dde.geometry.Interval(0, 1)
    timedomain = dde.geometry.TimeDomain(0, 1)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc = dde.DirichletBC(geomtime, func, lambda _, on_boundary: on_boundary)
    ic_1 = dde.IC(geomtime, func, lambda _, on_initial: on_initial)
    # do not use dde.NeumannBC here, since `normal_derivative` does not work with temporal coordinate.
    ic_2 = dde.OperatorBC(
        geomtime,
        lambda x, y, _: dde.grad.jacobian(y, x, i=0, j=1),
        lambda x, _: np.isclose(x[1], 0),
    )
    data = dde.data.TimePDE(
        geomtime,
        pde,
        [bc, ic_1, ic_2],
        num_domain=360,
        num_boundary=360,
        num_initial=360,
        solution=func,
        num_test=10000,
    )

    layer_size = [2] + [100] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.STMsFFN(layer_size,
                           activation,
                           initializer,
                           sigmas_x=[1],
                           sigmas_t=[1, 10])
    net.apply_feature_transform(lambda x: (x - 0.5) * 2 * np.sqrt(3))

    model = dde.Model(data, net)
    initial_losses = get_initial_loss(model)
    loss_weights = 5 / initial_losses
    model.compile(
        "adam",
        lr=0.001,
        metrics=["l2 relative error"],
        loss_weights=loss_weights,
        decay=("inverse time", 2000, 0.9),
    )
    pde_residual_resampler = dde.callbacks.PDEResidualResampler(period=1)
    losshistory, train_state = model.train(epochs=10000,
                                           callbacks=[pde_residual_resampler],
                                           display_every=500)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 10
0
def main():
    def pde(x, y):
        dy_xx = dde.grad.hessian(y, x, i=0, j=0)
        dy_yy = dde.grad.hessian(y, x, i=1, j=1)
        return -dy_xx - dy_yy - 1

    def boundary(_, on_boundary):
        return on_boundary

    geom = dde.geometry.Polygon([[0, 0], [1, 0], [1, -1], [-1, -1], [-1, 1],
                                 [0, 1]])
    bc = dde.DirichletBC(geom, lambda x: 0, boundary)

    data = dde.data.PDE(geom,
                        pde,
                        bc,
                        num_domain=1200,
                        num_boundary=120,
                        num_test=1500)
    net = dde.maps.FNN([2] + [50] * 4 + [1], "tanh", "Glorot uniform")
    model = dde.Model(data, net)

    model.compile("adam", lr=0.001)
    model.train(epochs=50000)
    model.compile("L-BFGS-B")
    losshistory, train_state = model.train()
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 11
0
def main():
    geom = dde.geometry.Interval(0, 1)
    bc = dde.DirichletBC(geom, func, lambda _, on_boundary: on_boundary)
    data = dde.data.PDE(
        geom,
        pde,
        bc,
        1280,
        2,
        train_distribution="pseudo",
        solution=func,
        num_test=10000,
    )

    layer_size = [1] + [100] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.MsFFN(layer_size, activation, initializer, sigmas=[1, 10])

    model = dde.Model(data, net)
    model.compile(
        "adam",
        lr=0.001,
        metrics=["l2 relative error"],
        decay=("inverse time", 2000, 0.9),
    )

    pde_residual_resampler = dde.callbacks.PDEResidualResampler(period=1)
    model.train(epochs=20000, callbacks=[pde_residual_resampler])

    dde.saveplot(model.losshistory, model.train_state, issave=True, isplot=True)
Exemplo n.º 12
0
def main():
    def pde(x, y):
        dy_r = dde.grad.jacobian(y, x, i=0, j=0)
        dy_rr = dde.grad.hessian(y, x, i=0, j=0)
        dy_thetatheta = dde.grad.hessian(y, x, i=1, j=1)
        return x[:, 0:1] * dy_r + x[:, 0:1]**2 * dy_rr + dy_thetatheta

    def solution(x):
        r, theta = x[:, 0:1], x[:, 1:]
        return r * np.cos(theta)

    geom = dde.geometry.Rectangle(xmin=[0, 0], xmax=[1, 2 * np.pi])
    bc_rad = dde.DirichletBC(
        geom,
        lambda x: np.cos(x[:, 1:2]),
        lambda x, on_boundary: on_boundary and np.isclose(x[0], 1),
    )
    data = dde.data.PDE(geom,
                        pde,
                        bc_rad,
                        num_domain=2540,
                        num_boundary=80,
                        solution=solution)

    net = dde.maps.FNN([2] + [20] * 3 + [1], "tanh", "Glorot normal")
    # Use [r*sin(theta), r*cos(theta)] as features,
    # so that the network is automatically periodic along the theta coordinate.
    net.apply_feature_transform(lambda x: tf.concat(
        [x[:, 0:1] * tf.sin(x[:, 1:2]), x[:, 0:1] * tf.cos(x[:, 1:2])], axis=1)
                                )

    model = dde.Model(data, net)
    model.compile("adam", lr=1e-3, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=15000)
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 13
0
def solve_poisson_with_dl(lightweight=False):
    # geom = dde.geometry.Polygon([[0, 0], [1, 0], [1, -1], [-1, -1], [-1, 1], [0, 1]])
    geom = dde.geometry.Rectangle(xmin=[0, 0], xmax=[1, 1])

    # bc = dde.DirichletBC(geom, lambda x: 0, boundary)
    bc = dde.DirichletBC(geom, _func, lambda _, on_boundary: on_boundary)

    data = dde.data.PDE(
        geom,
        _pde,
        bc,
        num_domain=1200,
        num_boundary=120,
        num_test=10000,
        # solution=_func,
    )

    # NN
    layer_size = [2] + [128] + [256] + [512] + [256] + [128] + [1]
    activation = 'relu'
    initializer = 'Glorot uniform'
    net = dde.maps.FNN(layer_size, activation, initializer)
    model = dde.Model(data, net)

    # Train NN
    model.compile('adam', lr=0.0005)

    losshistory, train_state = model.train(epochs=10000)

    if not lightweight:
        dde.saveplot(losshistory, train_state, issave=True, isplot=True)
        save_contour_from_model(model, 1, 1, 'poisson')

    return model
Exemplo n.º 14
0
def Poisson_PoliANN(Nd, Nb, Nh, Nl, sigma, opt, initializer):

    # PDE Definition using the TensorFlow notation
    def pde(x, y):

        f = 2 * (np.pi**2) * tf.sin(np.pi * x[:, 0:1]) \
            * tf.sin(np.pi * x[:, 1:2])

        # Definition of the spatial derivatives
        dy_x = tf.gradients(y, x)[0]
        dy_x, dy_y = dy_x[:, 0:1], dy_x[:, 1:]
        dy_xx = tf.gradients(dy_x, x)[0][:, 0:1]
        dy_yy = tf.gradients(dy_y, x)[0][:, 1:]

        # Definition of the Poisson equation
        return dy_xx + dy_yy + f

    # Definition of the boundary
    def boundary(_, on_boundary):
        return on_boundary

    # Definition of the homogeneous Dirichlet
    # boundary conditions
    def func(x):
        return np.zeros([len(x), 1])

    # Geometry definition

    geom = dde.geometry.Rectangle([0, 0], [1, 1])

    # Imposition of the Dirichlet boundary condition
    bc = dde.DirichletBC(geom, func, boundary)

    # ANN model definition
    data = dde.data.PDE(geom, pde, bc, num_domain=Nd, num_boundary=Nb)
    net = dde.maps.FNN([2] + [Nl] * Nh + [1], sigma, initializer)

    # Strict enforcement of the BCs for the unit square domain
    # net.apply_output_transform(lambda x, y: \
    #                           x[:,0:1] * (1 - x[:, 0:1]) * \
    #                           x[:,1:2] * (1 - x[:, 1:2]) * y)

    model = dde.Model(data, net)

    # ANN model training
    if opt == 'adam':
        model.compile(opt, lr=0.001)
        losshistory, train_state = model.train(epochs=10000)
    elif opt == 'L-BFGS-B':
        model.compile(opt)
        losshistory, train_state = model.train()
    elif opt == 'mixed':
        model.compile('adam', lr=0.001)
        model.train(epochs=5000)
        model.compile('L-BFGS-B')
        losshistory, train_state = model.train()
        dde.saveplot(losshistory, train_state, issave=True, isplot=True)

    return model
Exemplo n.º 15
0
def visualize_solution(nu):
    # Build pseudo model
    def pde(x, u):
        u_x = dde.grad.jacobian(u, x, i=0, j=0)
        u_t = dde.grad.jacobian(u, x, i=0, j=1)
        u_xx = dde.grad.hessian(u, x, i=0, j=0)
        return u_t + u * u_x - nu * u_xx

    spatial_domain = dde.geometry.Interval(x_min, x_max)
    temporal_domain = dde.geometry.TimeDomain(t_min, t_max)
    spatio_temporal_domain = dde.geometry.GeometryXTime(
        spatial_domain, temporal_domain)

    boundary_condition = dde.DirichletBC(spatio_temporal_domain, lambda x: 0,
                                         lambda _, on_boundary: on_boundary)
    initial_condition = dde.IC(spatio_temporal_domain,
                               lambda x: -np.sin(np.pi * x[:, 0:1]),
                               lambda _, on_initial: on_initial)

    data = dde.data.TimePDE(spatio_temporal_domain,
                            pde, [boundary_condition, initial_condition],
                            num_domain=domain_points,
                            num_boundary=boundary_points,
                            num_initial=initial_points,
                            num_test=test_points)

    net = dde.maps.FNN([2] + hidden_layers * [hidden_units] + [1], "tanh",
                       "Glorot normal")

    model = dde.Model(data, net)

    # Load reference solution
    file_name = 'Reference_Solutions/u_exact_nu_{}.mat'.format(nus[i])
    data = scipy.io.loadmat(file_name)
    u_exact = data['usol'].T
    x_test, t_test = np.meshgrid(np.linspace(x_min, x_max, test_points_x),
                                 np.linspace(t_min, t_max, test_points_t))
    X = np.vstack((np.ravel(x_test), np.ravel(t_test))).T

    # Reload model and make predictions
    model_name = 'Neural_Networks/nu_{}/Burger_Equation_Source_Model_nu_{}-{}'.format(
        nus[i], nus[i], epochs_source[i])
    model.compile("adam", lr=learning_rate)
    model.train(model_restore_path=model_name, epochs=0)

    u_pred = model.predict(X).reshape(test_points_t, test_points_x)
    f = model.predict(X, operator=pde)

    ax = plt.axes(projection="3d")
    ax.plot_wireframe(x_test, t_test, u_pred)
    ax.set_xlabel('location x')
    ax.set_ylabel('time t')
    ax.set_zlabel('u')
    plt.tight_layout()
    plt.savefig('Predictions/Predicted_solution_nu_{}.png'.format(nus[i]),
                dpi=300)

    return
Exemplo n.º 16
0
def main():
    def pde(x, y):
        dy_x = tf.gradients(y, x)[0]
        dy_x, dy_t = dy_x[:, 0:1], dy_x[:, 1:2]
        dy_xx = tf.gradients(dy_x, x)[0][:, 0:1]
        return dy_t + y * dy_x - 0.01 / np.pi * dy_xx

    geom = dde.geometry.Interval(-1, 1)
    timedomain = dde.geometry.TimeDomain(0, 0.99)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc = dde.DirichletBC(geomtime, lambda x: np.zeros((len(x), 1)),
                         lambda _, on_boundary: on_boundary)
    ic = dde.IC(geomtime, lambda x: -np.sin(np.pi * x[:, 0:1]),
                lambda _, on_initial: on_initial)

    data = dde.data.TimePDE(geomtime,
                            1,
                            pde, [bc, ic],
                            num_domain=2500,
                            num_boundary=100,
                            num_initial=160)
    net = dde.maps.FNN([2] + [20] * 3 + [1], "tanh", "Glorot normal")
    model = dde.Model(data, net)

    model.compile("adam", lr=1.0e-3)
    model.train(epochs=10000)
    model.compile("L-BFGS-B")
    model.train()

    X = geomtime.random_points(100000)
    err = 1
    while err > 0.005:
        f = model.predict(X, operator=pde)
        err_eq = np.absolute(f)
        err = np.mean(err_eq)
        print("Mean residual: %.3e" % (err))

        x_id = np.argmax(err_eq)
        print("Adding new point:", X[x_id], "\n")
        data.add_anchors(X[x_id])
        early_stopping = dde.callbacks.EarlyStopping(min_delta=1e-4,
                                                     patience=2000)
        model.compile("adam", lr=1e-3)
        model.train(epochs=10000,
                    disregard_previous_best=True,
                    callbacks=[early_stopping])
        model.compile("L-BFGS-B")
        losshistory, train_state = model.train()
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)

    X, y_true = gen_testdata()
    y_pred = model.predict(X)
    print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))
    np.savetxt("test.dat", np.hstack((X, y_true, y_pred)))
Exemplo n.º 17
0
def main():
    
    def pde(x, y):
        dy_t = dde.grad.jacobian(y, x,i=0, j=4)
        dy_xx = dde.grad.hessian(y, x,component=0 , j=0)
        # dy_xx = dde.grad.hessian(y, x , j=0)
        return (
            dy_t
            - dy_xx
            + tf.exp(-x[:, 4:])
            * (tf.sin(np.pi * x[:, 0:1]) - np.pi ** 2 * tf.sin(np.pi * x[:, 0:1])),
            x[:, 0:1] * 0,
        )

    def func(x):
        return np.sin(np.pi * x[:, 0:1]) * np.exp(-x[:, 4:])
    def func2(x):
        return np.sin(np.pi * x[:, 0:1]) * np.exp(-x[:, 4:]),0

    # geom = dde.geometry.Interval(-1, 1)
    geom = dde.geometry.Rectangle
    geom = dde.geometry.Hypercube([-1]*4,[1]*4)
    timedomain = dde.geometry.TimeDomain(0, 1)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc = dde.DirichletBC(geomtime, func, lambda _, on_boundary: on_boundary,component=0)
    ic = dde.IC(geomtime, func, lambda _, on_initial: on_initial,component=0)
    ic2 = dde.IC(geomtime,lambda shit:  1, lambda _, on_initial: on_initial,component=1)
    # observe_x = np.vstack((np.linspace(-1, 1, num=10), np.full((10), 1))).T
    # ptset = dde.bc.PointSet(observe_x)
    # observe_y = dde.DirichletBC(
    #     geomtime, ptset.values_to_func(func(observe_x)), lambda x, _: ptset.inside(x)
    # )

    data = dde.data.TimePDE(
        geomtime,
        pde,
        [bc, ic,ic2],
        num_domain=4000,
        num_boundary=1,
        num_initial=100,
    )
    
    layer_size = [5] + [32] * 3 + [2]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)

    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=10000)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
def train_source_model(nu):
  path = Path('Neural_Networks', 'nu_{}'.format(nus[i]))

  if path.exists() and path.is_dir():
    shutil.rmtree(path)

  os.mkdir(path)
  
  def pde(x, u):
    du_xx = dde.grad.hessian(u, x)
    return du_xx + nu*np.pi ** 2 * tf.sin(np.pi * x)
  
  def func(x):
    return nu*np.sin(np.pi * x)

  spatial_domain = dde.geometry.Interval(x_min, x_max)

  boundary_condition = dde.DirichletBC(spatial_domain, lambda x: 0, lambda _, on_boundary: on_boundary)


  data = dde.data.PDE(spatial_domain, pde, [boundary_condition],
                          num_domain=domain_points, num_boundary=boundary_points, solution = func, num_test=test_points)

  net = dde.maps.FNN([1] + hidden_layers * [hidden_units] + [1], "tanh", "Glorot normal")

  model = dde.Model(data, net)

  model_name = 'Neural_Networks/nu_{}/Poisson_Equation_Source_Model_nu_{}'.format(nus[i], nus[i])

  start = time.time()

  model.compile("adam", lr=1e-3)
  history, train_state = model.train(epochs=number_of_epochs, model_save_path=model_name)

  end = time.time()
  length = end - start

  X = np.linspace(x_min, x_max, test_points).reshape(-1, 1)
  u_pred = model.predict(X)
  u_exact = func(X)
  f = model.predict(X, operator=pde)

  figure_name = 'Predictions/Predicted_solution_nu_{}'.format(nu)
  plt.plot(X, u_exact, color = 'blue', label = 'exact solution')
  plt.plot(X, u_pred, color = 'red', linestyle ='--', label = 'predicted solution')
  plt.xlabel(r'location $x$')
  plt.ylabel(r'$u$')
  plt.legend(loc="upper left")
  plt.tight_layout()
  plt.savefig(figure_name, dpi = 600)

  residual = np.mean(np.absolute(f))
  l2_difference = dde.metrics.l2_relative_error(u_exact, u_pred)
def main():
    alpha0 = 1.8
    alpha = tf.Variable(1.5)

    def fpde(x, y, int_mat):
        """\int_theta D_theta^alpha u(x)
        """
        if isinstance(int_mat, (list, tuple)) and len(int_mat) == 3:
            int_mat = tf.SparseTensor(*int_mat)
            lhs = tf.sparse_tensor_dense_matmul(int_mat, y)
        else:
            lhs = tf.matmul(int_mat, y)
        lhs = lhs[:, 0]
        lhs *= -tf.exp(
            tf.lgamma((1 - alpha) / 2) + tf.lgamma(
                (2 + alpha) / 2)) / (2 * np.pi**1.5)
        x = x[:tf.size(lhs)]
        rhs = (2**alpha0 * gamma(2 + alpha0 / 2) * gamma(1 + alpha0 / 2) *
               (1 - (1 + alpha0 / 2) * tf.reduce_sum(x**2, axis=1)))
        return lhs - rhs

    def func(x):
        return (1 - np.linalg.norm(x, axis=1, keepdims=True)**2)**(1 +
                                                                   alpha0 / 2)

    geom = dde.geometry.Disk([0, 0], 1)

    observe_x = geom.random_points(30)
    ptset = dde.bc.PointSet(observe_x)
    observe_y = dde.DirichletBC(geom, ptset.values_to_func(func(observe_x)),
                                lambda x, _: ptset.inside(x))

    data = dde.data.FPDE(
        geom,
        fpde,
        alpha,
        observe_y,
        [8, 100],
        num_domain=64,
        anchors=observe_x,
        solution=func,
    )

    net = dde.maps.FNN([2] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(
        lambda x, y: (1 - tf.reduce_sum(x**2, axis=1, keepdims=True)) * y)

    model = dde.Model(data, net)
    model.compile("adam", lr=1e-3, loss_weights=[1, 100])
    variable = dde.callbacks.VariableValue(alpha, period=1000)
    losshistory, train_state = model.train(epochs=10000, callbacks=[variable])
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 20
0
def main():
    alpha = 1.5

    def fpde(x, y, int_mat):
        """(D_{0+}^alpha + D_{1-}^alpha) u(x) = f(x)
        """
        if isinstance(int_mat, (list, tuple)) and len(int_mat) == 3:
            int_mat = tf.SparseTensor(*int_mat)
            lhs = tf.sparse_tensor_dense_matmul(int_mat, y)
        else:
            lhs = tf.matmul(int_mat, y)
        rhs = (gamma(4) / gamma(4 - alpha) * (x**(3 - alpha) +
                                              (1 - x)**(3 - alpha)) -
               3 * gamma(5) / gamma(5 - alpha) * (x**(4 - alpha) +
                                                  (1 - x)**(4 - alpha)) +
               3 * gamma(6) / gamma(6 - alpha) * (x**(5 - alpha) +
                                                  (1 - x)**(5 - alpha)) -
               gamma(7) / gamma(7 - alpha) * (x**(6 - alpha) +
                                              (1 - x)**(6 - alpha)))
        # lhs /= 2 * np.cos(alpha * np.pi / 2)
        # rhs = gamma(alpha + 2) * x
        return lhs - rhs[:tf.size(lhs)]

    def func(x):
        # return x * (np.abs(1 - x**2)) ** (alpha / 2)
        return x**3 * (1 - x)**3

    geom = dde.geometry.Interval(0, 1)
    bc = dde.DirichletBC(geom, func, lambda _, on_boundary: on_boundary)

    # Static auxiliary points
    data = dde.data.FPDE(geom,
                         fpde,
                         alpha,
                         bc, [101],
                         meshtype="static",
                         solution=func)
    # Dynamic auxiliary points
    # data = dde.data.FPDE(
    #     geom, fpde, alpha, bc, [100], meshtype="dynamic", num_domain=20, num_boundary=2, solution=func, num_test=100
    # )

    net = dde.maps.FNN([1] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(lambda x, y: x * (1 - x) * y)

    model = dde.Model(data, net)

    model.compile("adam", lr=1e-3)
    losshistory, train_state = model.train(epochs=10000)
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 21
0
def main():
    """
    main function for differential function with q
    """
    dde.config.real.set_float64()
    # geometry part
    tmax = 1
    Qmax = 10 / C.sigma
    Xmin = []
    Xmax = []
    for i in range(1, C.N + 1):
        Xmin.append(-Qmax)
        Xmax.append(Qmax)
    geom = dde.geometry.Hypercube(Xmin, Xmax)
    # geom = dde.geometry.Interval(-1, 1)
    timedomain = dde.geometry.TimeDomain(0, tmax)
    geom = dde.geometry.GeometryXTime(geom, timedomain)
    x_initial = np.random.rand(13, C.N + 1)
    xtest = tf.convert_to_tensor(x_initial)
    ytest = np.random.rand(13, C.sizeRho)
    ytest = tf.convert_to_tensor(ytest)
    # Initial conditions
    ic = []
    for j in range(0, (C.N + 1)**2):
        ic.append(
            dde.IC(geom, lambda X: initialState(X, j), boundary, component=j))
        # test
        # print(initialState(x_initial,j))
    # print(SLE_q(xtest,ytest))
    bc = dde.DirichletBC(geom, lambda _: 0, lambda _, on_boundary: on_boundary)
    ic.append(bc)
    # data
    data = dde.data.TimePDE(geom,
                            lambda x, y: SLE_q(x, y, C),
                            ic,
                            num_domain=4000,
                            num_boundary=0,
                            num_initial=100,
                            num_test=None)

    layer_size = [C.N + 1] + [50] * 3 + [(C.N + 1)**2]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)
    model = dde.Model(data, net)
    model.compile("adam", lr=0.001)
    model.compile("L-BFGS-B")
    losshistory, train_state = model.train(epochs=600000,
                                           callbacks=ModelCheckpoint)
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 22
0
def main():
    def ddy(x, y):
        dy_x = tf.gradients(y, x)[0]
        return tf.gradients(dy_x, x)[0]

    def dddy(x, y):
        return tf.gradients(ddy(x, y), x)[0]

    def pde(x, y):
        dy_xxxx = tf.gradients(dddy(x, y), x)[0]
        return dy_xxxx + 1

    def boundary_l(x, on_boundary):
        return on_boundary and np.isclose(x[0], 0)

    def boundary_r(x, on_boundary):
        return on_boundary and np.isclose(x[0], 1)

    def func(x):
        return -x ** 4 / 24 + x ** 3 / 6 - x ** 2 / 4

    geom = dde.geometry.Interval(0, 1)

    zero_func = lambda x: np.zeros((len(x), 1))
    bc1 = dde.DirichletBC(geom, zero_func, boundary_l)
    bc2 = dde.NeumannBC(geom, zero_func, boundary_l)
    bc3 = dde.OperatorBC(geom, lambda x, y, _: ddy(x, y), boundary_r)
    bc4 = dde.OperatorBC(geom, lambda x, y, _: dddy(x, y), boundary_r)

    data = dde.data.PDE(
        geom,
        1,
        pde,
        [bc1, bc2, bc3, bc4],
        num_domain=10,
        num_boundary=2,
        func=func,
        num_test=100,
    )
    layer_size = [1] + [20] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=10000)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 23
0
def main():
    geom = dde.geometry.Interval(0, 1)
    timedomain = dde.geometry.TimeDomain(0, 1)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc = dde.DirichletBC(geomtime, func, lambda _, on_boundary: on_boundary)
    ic = dde.IC(geomtime, func, lambda _, on_initial: on_initial)

    # Static auxiliary points
    data = dde.data.TimeFPDE(
        geomtime,
        fpde,
        alpha,
        [bc, ic],
        [52],
        meshtype="static",
        num_domain=400,
        solution=func,
    )
    # Dynamic auxiliary points
    # data = dde.data.TimeFPDE(
    #     geomtime,
    #     fpde,
    #     alpha,
    #     [bc, ic],
    #     [100],
    #     num_domain=20,
    #     num_boundary=1,
    #     num_initial=1,
    #     solution=func,
    #     num_test=50,
    # )

    net = dde.maps.FNN([2] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(
        lambda x, y: x[:, 0:1] * (1 - x[:, 0:1]) * x[:, 1:2] * y
        + x[:, 0:1] ** 3 * (1 - x[:, 0:1]) ** 3
    )

    model = dde.Model(data, net)
    model.compile("adam", lr=1e-3)
    losshistory, train_state = model.train(epochs=10000)
    dde.saveplot(losshistory, train_state, issave=False, isplot=True)

    X = geomtime.random_points(1000)
    y_true = func(X)
    y_pred = model.predict(X)
    print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))
    np.savetxt("test.dat", np.hstack((X, y_true, y_pred)))
Exemplo n.º 24
0
def main():
    def ide(x, y, int_mat):
        rhs = tf.matmul(int_mat, y)
        lhs1 = tf.gradients(y, x)[0]
        return (lhs1 + y)[:tf.size(rhs)] - rhs

    def kernel(x, s):
        return np.exp(s - x)

    def boundary(x, on_boundary):
        return on_boundary and np.isclose(x[0], 0)

    def func(x):
        return np.exp(-x) * np.cosh(x)

    geom = dde.geometry.Interval(0, 5)
    bc = dde.DirichletBC(geom, func, boundary)

    quad_deg = 20
    data = dde.data.IDE(
        geom,
        ide,
        bc,
        quad_deg,
        kernel=kernel,
        num_domain=10,
        num_boundary=2,
        train_distribution="uniform",
    )

    layer_size = [1] + [20] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("L-BFGS-B")
    model.train()

    X = geom.uniform_points(100)
    y_true = func(X)
    y_pred = model.predict(X)
    print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))

    plt.figure()
    plt.plot(X, y_true, "-")
    plt.plot(X, y_pred, "o")
    plt.show()
    np.savetxt("test.dat", np.hstack((X, y_true, y_pred)))
Exemplo n.º 25
0
def main():
    def pde(x, y):
        dy_t = dde.grad.jacobian(y, x, i=0, j=1)
        dy_xx = dde.grad.hessian(y, x, i=0, j=0)
        return (
            dy_t
            - dy_xx
            + tf.exp(-x[:, 1:])
            * (tf.sin(np.pi * x[:, 0:1]) - np.pi ** 2 * tf.sin(np.pi * x[:, 0:1]))
        )

    def func(x):
        return np.sin(np.pi * x[:, 0:1]) * np.exp(-x[:, 1:])

    geom = dde.geometry.Interval(-1, 1)
    timedomain = dde.geometry.TimeDomain(0, 1)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc = dde.DirichletBC(geomtime, func, lambda _, on_boundary: on_boundary)
    ic = dde.IC(geomtime, func, lambda _, on_initial: on_initial)
    data = dde.data.TimePDE(
        geomtime,
        pde,
        [bc, ic],
        num_domain=40,
        num_boundary=20,
        num_initial=10,
        train_distribution="pseudo",
        solution=func,
        num_test=10000,
    )

    layer_size = [2] + [32] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)

    for _ in range(5):
        model.compile("adam", lr=0.001, metrics=["l2 relative error"])
        model.train(epochs=2000)
        print("epoch = {}, resample train points...".format(model.train_state.epoch))
        data.resample_train_points()
    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=2000)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
def main():
    alpha = 1.8

    def fpde(x, y, int_mat):
        """\int_theta D_theta^alpha u(x)
        """
        if isinstance(int_mat, (list, tuple)) and len(int_mat) == 3:
            int_mat = tf.SparseTensor(*int_mat)
            lhs = tf.sparse_tensor_dense_matmul(int_mat, y)
        else:
            lhs = tf.matmul(int_mat, y)
        lhs = lhs[:, 0]
        lhs *= gamma((1 - alpha) / 2) * gamma(
            (2 + alpha) / 2) / (2 * np.pi**1.5)
        x = x[:tf.size(lhs)]
        rhs = (2**alpha * gamma(2 + alpha / 2) * gamma(1 + alpha / 2) *
               (1 - (1 + alpha / 2) * tf.reduce_sum(x**2, axis=1)))
        return lhs - rhs

    def func(x):
        return (np.abs(1 - np.linalg.norm(x, axis=1, keepdims=True)**2))**(
            1 + alpha / 2)

    geom = dde.geometry.Disk([0, 0], 1)
    bc = dde.DirichletBC(geom, func, lambda _, on_boundary: on_boundary)

    data = dde.data.FPDE(geom,
                         fpde,
                         alpha,
                         bc, [8, 100],
                         num_domain=100,
                         num_boundary=1,
                         solution=func)

    net = dde.maps.FNN([2] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(
        lambda x, y: (1 - tf.reduce_sum(x**2, axis=1, keepdims=True)) * y)

    model = dde.Model(data, net)
    model.compile("adam", lr=1e-3)
    losshistory, train_state = model.train(epochs=20000)
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)

    X = geom.random_points(1000)
    y_true = func(X)
    y_pred = model.predict(X)
    print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))
    np.savetxt("test.dat", np.hstack((X, y_true, y_pred)))
Exemplo n.º 27
0
def main():
    def pde(x, y):
        dy_x = tf.gradients(y, x)[0]
        dy_x, dy_t = dy_x[:, 0:1], dy_x[:, 1:]
        dy_xx = tf.gradients(dy_x, x)[0][:, 0:1]
        return (
            dy_t
            - dy_xx
            + tf.exp(-x[:, 1:])
            * (tf.sin(np.pi * x[:, 0:1]) - np.pi ** 2 * tf.sin(np.pi * x[:, 0:1]))
        )

    def func(x):
        return np.sin(np.pi * x[:, 0:1]) * np.exp(-x[:, 1:])

    geom = dde.geometry.Interval(-1, 1)
    timedomain = dde.geometry.TimeDomain(0, 1)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc = dde.DirichletBC(geomtime, func, lambda _, on_boundary: on_boundary)
    ic = dde.IC(geomtime, func, lambda _, on_initial: on_initial)
    data = dde.data.TimePDE(
        geomtime,
        pde,
        [bc, ic],
        num_domain=40,
        num_boundary=1,
        num_initial=1,
        solution=func,
        num_test=10000,
    )

    layer_size = [2] + [32] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)
    net.outputs_modify(
        lambda x, y: x[:, 1:2] * (1 - x[:, 0:1] ** 2) * y + tf.sin(np.pi * x[:, 0:1])
    )

    model = dde.Model(data, net)

    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    losshistory, train_state = model.train(epochs=10000)

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 28
0
def main():
    def pde(x, y):
        dy_xx = dde.grad.hessian(y, x)
        return -dy_xx - np.pi ** 2 * tf.sin(np.pi * x)

    def boundary(x, on_boundary):
        return on_boundary

    def func(x):
        return np.sin(np.pi * x)

    geom = dde.geometry.Interval(-1, 1)
    bc = dde.DirichletBC(geom, func, boundary)
    data = dde.data.PDE(geom, pde, bc, 16, 2, solution=func, num_test=100)

    layer_size = [1] + [50] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001, metrics=["l2 relative error"])

    checkpointer = dde.callbacks.ModelCheckpoint(
        "model/model.ckpt", verbose=1, save_better_only=True
    )
    # ImageMagick (https://imagemagick.org/) is required to generate the movie.
    movie = dde.callbacks.MovieDumper(
        "model/movie", [-1], [1], period=100, save_spectrum=True, y_reference=func
    )
    losshistory, train_state = model.train(
        epochs=10000, callbacks=[checkpointer, movie]
    )

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)

    # Plot PDE residual
    model.restore("model/model.ckpt-" + str(train_state.best_step), verbose=1)
    x = geom.uniform_points(1000, True)
    y = model.predict(x, operator=pde)
    plt.figure()
    plt.plot(x, y)
    plt.xlabel("x")
    plt.ylabel("PDE residual")
    plt.show()
Exemplo n.º 29
0
def main():
    def ide(x, y, int_mat):
        """int_0^x y(t)dt
        """
        lhs1 = tf.matmul(int_mat, y)
        lhs2 = tf.gradients(y, x)[0]
        rhs = 2 * np.pi * tf.cos(2 * np.pi * x) + tf.sin(np.pi * x) ** 2 / np.pi
        return lhs1 + (lhs2 - rhs)[: tf.size(lhs1)]

    def boundary(x, on_boundary):
        return on_boundary and np.isclose(x[0], 0)

    def func(x):
        """
        x: array_like, N x D_in
        y: array_like, N x D_out
        """
        return np.sin(2 * np.pi * x)

    geom = dde.geometry.Interval(0, 1)
    bc = dde.DirichletBC(geom, func, boundary)

    quad_deg = 16
    data = dde.data.IDE(geom, ide, bc, quad_deg, num_domain=16, num_boundary=2)

    layer_size = [1] + [20] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)
    model.compile("adam", lr=0.001)
    model.train(epochs=10000)

    X = geom.uniform_points(100, True)
    y_true = func(X)
    y_pred = model.predict(X)
    print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))

    plt.figure()
    plt.plot(X, y_true, "-")
    plt.plot(X, y_pred, "o")
    plt.show()
    np.savetxt("test.dat", np.hstack((X, y_true, y_pred)))
Exemplo n.º 30
0
def main():
    geom = dde.geometry.Polygon([[0, 0], [1, 0], [1, -1], [-1, -1], [-1, 1],
                                 [0, 1]])
    bc = dde.DirichletBC(geom, lambda x: 0, boundary)

    data = dde.data.PDE(geom,
                        pde,
                        bc,
                        num_domain=1200,
                        num_boundary=120,
                        num_test=1500)
    net = dde.maps.FNN([2] + [50] * 4 + [1], "tanh", "Glorot uniform")
    model = dde.Model(data, net)

    model.compile("adam", lr=0.001)
    model.train(epochs=50000)
    model.compile("L-BFGS-B")
    losshistory, train_state = model.train()
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)