def main():
    geom = dde.geometry.Disk([0, 0], 1)

    observe_x = geom.random_points(30)
    observe_y = dde.PointSetBC(observe_x, func(observe_x))

    data = dde.data.FPDE(
        geom,
        fpde,
        alpha,
        observe_y,
        [8, 100],
        num_domain=64,
        anchors=observe_x,
        solution=func,
    )

    net = dde.maps.FNN([2] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(
        lambda x, y: (1 - tf.reduce_sum(x**2, axis=1, keepdims=True)) * y)

    model = dde.Model(data, net)
    model.compile("adam", lr=1e-3, loss_weights=[1, 100])
    variable = dde.callbacks.VariableValue(alpha, period=1000)
    losshistory, train_state = model.train(epochs=10000, callbacks=[variable])
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 2
0
def main():
    geom = dde.geometry.Sphere([0, 0, 0], 1)
    bc = dde.DirichletBC(geom, func, lambda _, on_boundary: on_boundary)

    data = dde.data.FPDE(
        geom,
        fpde,
        alpha,
        bc,
        [8, 8, 100],
        num_domain=256,
        num_boundary=1,
        solution=func,
    )

    net = dde.maps.FNN([3] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(
        lambda x, y: (1 - tf.reduce_sum(x**2, axis=1, keepdims=True)) * y)

    model = dde.Model(data, net)
    model.compile("adam", lr=1e-3)
    losshistory, train_state = model.train(epochs=10000)
    dde.saveplot(losshistory, train_state, issave=False, isplot=True)

    X = geom.random_points(10000)
    y_true = func(X)
    y_pred = model.predict(X)
    print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))
    np.savetxt("test.dat", np.hstack((X, y_true, y_pred)))
def main():
    alpha0 = 1.8
    alpha = tf.Variable(1.5)

    def fpde(x, y, int_mat):
        """\int_theta D_theta^alpha u(x)
        """
        if isinstance(int_mat, (list, tuple)) and len(int_mat) == 3:
            int_mat = tf.SparseTensor(*int_mat)
            lhs = tf.sparse_tensor_dense_matmul(int_mat, y)
        else:
            lhs = tf.matmul(int_mat, y)
        lhs = lhs[:, 0]
        lhs *= -tf.exp(
            tf.lgamma((1 - alpha) / 2) + tf.lgamma(
                (2 + alpha) / 2)) / (2 * np.pi**1.5)
        x = x[:tf.size(lhs)]
        rhs = (2**alpha0 * gamma(2 + alpha0 / 2) * gamma(1 + alpha0 / 2) *
               (1 - (1 + alpha0 / 2) * tf.reduce_sum(x**2, axis=1)))
        return lhs - rhs

    def func(x):
        return (1 - np.linalg.norm(x, axis=1, keepdims=True)**2)**(1 +
                                                                   alpha0 / 2)

    geom = dde.geometry.Disk([0, 0], 1)

    observe_x = geom.random_points(30)
    ptset = dde.bc.PointSet(observe_x)
    observe_y = dde.DirichletBC(geom, ptset.values_to_func(func(observe_x)),
                                lambda x, _: ptset.inside(x))

    data = dde.data.FPDE(
        geom,
        fpde,
        alpha,
        observe_y,
        [8, 100],
        num_domain=64,
        anchors=observe_x,
        solution=func,
    )

    net = dde.maps.FNN([2] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(
        lambda x, y: (1 - tf.reduce_sum(x**2, axis=1, keepdims=True)) * y)

    model = dde.Model(data, net)
    model.compile("adam", lr=1e-3, loss_weights=[1, 100])
    variable = dde.callbacks.VariableValue(alpha, period=1000)
    losshistory, train_state = model.train(epochs=10000, callbacks=[variable])
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemplo n.º 4
0
def fpde(x, y, int_mat):
    """\int_theta D_theta^alpha u(x)"""
    if isinstance(int_mat, (list, tuple)) and len(int_mat) == 3:
        int_mat = tf.SparseTensor(*int_mat)
        lhs = tf.sparse_tensor_dense_matmul(int_mat, y)
    else:
        lhs = tf.matmul(int_mat, y)
    lhs = lhs[:, 0]
    lhs *= gamma((1 - alpha) / 2) * gamma((3 + alpha) / 2) / (2 * np.pi**2)
    x = x[:tf.size(lhs)]
    rhs = (2**alpha * gamma(2 + alpha / 2) * gamma((3 + alpha) / 2) /
           gamma(3 / 2) * (1 - (1 + alpha / 3) * tf.reduce_sum(x**2, axis=1)))
    return lhs - rhs
def main():
    alpha = 1.8

    def fpde(x, y, int_mat):
        """\int_theta D_theta^alpha u(x)
        """
        if isinstance(int_mat, (list, tuple)) and len(int_mat) == 3:
            int_mat = tf.SparseTensor(*int_mat)
            lhs = tf.sparse_tensor_dense_matmul(int_mat, y)
        else:
            lhs = tf.matmul(int_mat, y)
        lhs = lhs[:, 0]
        lhs *= gamma((1 - alpha) / 2) * gamma(
            (2 + alpha) / 2) / (2 * np.pi**1.5)
        x = x[:tf.size(lhs)]
        rhs = (2**alpha * gamma(2 + alpha / 2) * gamma(1 + alpha / 2) *
               (1 - (1 + alpha / 2) * tf.reduce_sum(x**2, axis=1)))
        return lhs - rhs

    def func(x):
        return (np.abs(1 - np.linalg.norm(x, axis=1, keepdims=True)**2))**(
            1 + alpha / 2)

    geom = dde.geometry.Disk([0, 0], 1)
    bc = dde.DirichletBC(geom, func, lambda _, on_boundary: on_boundary)

    data = dde.data.FPDE(geom,
                         fpde,
                         alpha,
                         bc, [8, 100],
                         num_domain=100,
                         num_boundary=1,
                         solution=func)

    net = dde.maps.FNN([2] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(
        lambda x, y: (1 - tf.reduce_sum(x**2, axis=1, keepdims=True)) * y)

    model = dde.Model(data, net)
    model.compile("adam", lr=1e-3)
    losshistory, train_state = model.train(epochs=20000)
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)

    X = geom.random_points(1000)
    y_true = func(X)
    y_pred = model.predict(X)
    print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred))
    np.savetxt("test.dat", np.hstack((X, y_true, y_pred)))