def main():
    kf = tf.Variable(0.05)
    D = tf.Variable(1.0)

    def pde(x, y):
        ca, cb = y[:, 0:1], y[:, 1:2]
        dca_t = dde.grad.jacobian(y, x, i=0, j=1)
        dca_xx = dde.grad.hessian(y, x, component=0, i=0, j=0)
        dcb_t = dde.grad.jacobian(y, x, i=1, j=1)
        dcb_xx = dde.grad.hessian(y, x, component=1, i=0, j=0)
        eq_a = dca_t - 1e-3 * D * dca_xx + kf * ca * cb ** 2
        eq_b = dcb_t - 1e-3 * D * dcb_xx + 2 * kf * ca * cb ** 2
        return [eq_a, eq_b]

    def fun_bc(x):
        return 1 - x[:, 0:1]

    def fun_init(x):
        return np.exp(-20 * x[:, 0:1])

    geom = dde.geometry.Interval(0, 1)
    timedomain = dde.geometry.TimeDomain(0, 10)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc_a = dde.DirichletBC(
        geomtime, fun_bc, lambda _, on_boundary: on_boundary, component=0
    )
    bc_b = dde.DirichletBC(
        geomtime, fun_bc, lambda _, on_boundary: on_boundary, component=1
    )
    ic1 = dde.IC(geomtime, fun_init, lambda _, on_initial: on_initial, component=0)
    ic2 = dde.IC(geomtime, fun_init, lambda _, on_initial: on_initial, component=1)

    observe_x, Ca, Cb = gen_traindata()
    ptset = dde.bc.PointSet(observe_x)
    observe_y1 = dde.DirichletBC(
        geomtime, ptset.values_to_func(Ca), lambda x, _: ptset.inside(x), component=0
    )
    observe_y2 = dde.DirichletBC(
        geomtime, ptset.values_to_func(Cb), lambda x, _: ptset.inside(x), component=1
    )

    data = dde.data.TimePDE(
        geomtime,
        pde,
        [bc_a, bc_b, ic1, ic2, observe_y1, observe_y2],
        num_domain=2000,
        num_boundary=100,
        num_initial=100,
        anchors=observe_x,
        num_test=50000,
    )
    net = dde.maps.FNN([2] + [20] * 3 + [2], "tanh", "Glorot uniform")
    model = dde.Model(data, net)
    model.compile("adam", lr=0.001)
    variable = dde.callbacks.VariableValue(
        [kf, D], period=1000, filename="variables.dat"
    )
    losshistory, train_state = model.train(epochs=80000, callbacks=[variable])
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemple #2
0
def main():
    C1 = tf.Variable(1.0)
    C2 = tf.Variable(1.0)
    C3 = tf.Variable(1.0)

    def Lorenz_system(x, y):
        """Lorenz system.
        dy1/dx = 10 * (y2 - y1)
        dy2/dx = y1 * (28 - y3) - y2
        dy3/dx = y1 * y2 - 8/3 * y3
        """
        y1, y2, y3 = y[:, 0:1], y[:, 1:2], y[:, 2:]
        dy1_x = dde.grad.jacobian(y, x, i=0)
        dy2_x = dde.grad.jacobian(y, x, i=1)
        dy3_x = dde.grad.jacobian(y, x, i=2)
        return [
            dy1_x - C1 * (y2 - y1),
            dy2_x - y1 * (C2 - y3) + y2,
            dy3_x - y1 * y2 + C3 * y3,
        ]

    def boundary(_, on_initial):
        return on_initial

    geom = dde.geometry.TimeDomain(0, 3)

    # Initial conditions
    ic1 = dde.IC(geom, lambda X: -8, boundary, component=0)
    ic2 = dde.IC(geom, lambda X: 7, boundary, component=1)
    ic3 = dde.IC(geom, lambda X: 27, boundary, component=2)

    # Get the train data
    observe_t, ob_y = gen_traindata()
    observe_y0 = dde.PointSetBC(observe_t, ob_y[:, 0:1], component=0)
    observe_y1 = dde.PointSetBC(observe_t, ob_y[:, 1:2], component=1)
    observe_y2 = dde.PointSetBC(observe_t, ob_y[:, 2:3], component=2)

    data = dde.data.PDE(
        geom,
        Lorenz_system,
        [ic1, ic2, ic3, observe_y0, observe_y1, observe_y2],
        num_domain=400,
        num_boundary=2,
        anchors=observe_t,
    )

    net = dde.maps.FNN([1] + [40] * 3 + [3], "tanh", "Glorot uniform")
    model = dde.Model(data, net)
    model.compile("adam", lr=0.001)
    variable = dde.callbacks.VariableValue([C1, C2, C3],
                                           period=600,
                                           filename="variables.dat")
    losshistory, train_state = model.train(epochs=60000, callbacks=[variable])
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemple #3
0
    def params_to_inverse(self, args_param):

        params = []
        if not args_param:
            return self.a, self.b, self.D, params
        ## If inverse:
        ## The tf.variables are initialized with a positive scalar, relatively close to their ground truth values
        if 'a' in args_param:
            self.a = tf.math.exp(tf.Variable(-3.92))
            params.append(self.a)
        if 'b' in args_param:
            self.b = tf.math.exp(tf.Variable(-1.2))
            params.append(self.b)
        if 'd' in args_param:
            self.D = tf.math.exp(tf.Variable(-1.6))
            params.append(self.D)
        return params
def main():
    alpha0 = 1.8
    alpha = tf.Variable(1.5)

    def fpde(x, y, int_mat):
        """(D_{0+}^alpha + D_{1-}^alpha) u(x)
        """
        if isinstance(int_mat, (list, tuple)) and len(int_mat) == 3:
            int_mat = tf.SparseTensor(*int_mat)
            lhs = tf.sparse_tensor_dense_matmul(int_mat, y)
        else:
            lhs = tf.matmul(int_mat, y)
        lhs /= 2 * tf.cos(alpha * np.pi / 2)
        rhs = gamma(alpha0 + 2) * x
        return lhs - rhs[: tf.size(lhs)]

    def func(x):
        return x * (np.abs(1 - x ** 2)) ** (alpha0 / 2)

    geom = dde.geometry.Interval(-1, 1)

    observe_x = np.linspace(-1, 1, num=20)[:, None]
    observe_y = dde.PointSetBC(observe_x, func(observe_x))

    # Static auxiliary points
    # data = dde.data.FPDE(
    #     geom,
    #     fpde,
    #     alpha,
    #     observe_y,
    #     [101],
    #     meshtype="static",
    #     anchors=observe_x,
    #     solution=func,
    # )
    # Dynamic auxiliary points
    data = dde.data.FPDE(
        geom,
        fpde,
        alpha,
        observe_y,
        [100],
        meshtype="dynamic",
        num_domain=20,
        anchors=observe_x,
        solution=func,
        num_test=100,
    )

    net = dde.maps.FNN([1] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(lambda x, y: (1 - x ** 2) * y)

    model = dde.Model(data, net)

    model.compile("adam", lr=1e-3, loss_weights=[1, 100])
    variable = dde.callbacks.VariableValue(alpha, period=1000)
    losshistory, train_state = model.train(epochs=10000, callbacks=[variable])
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
def main():
    alpha0 = 1.8
    alpha = tf.Variable(1.5)

    def fpde(x, y, int_mat):
        """\int_theta D_theta^alpha u(x)
        """
        if isinstance(int_mat, (list, tuple)) and len(int_mat) == 3:
            int_mat = tf.SparseTensor(*int_mat)
            lhs = tf.sparse_tensor_dense_matmul(int_mat, y)
        else:
            lhs = tf.matmul(int_mat, y)
        lhs = lhs[:, 0]
        lhs *= -tf.exp(
            tf.lgamma((1 - alpha) / 2) + tf.lgamma(
                (2 + alpha) / 2)) / (2 * np.pi**1.5)
        x = x[:tf.size(lhs)]
        rhs = (2**alpha0 * gamma(2 + alpha0 / 2) * gamma(1 + alpha0 / 2) *
               (1 - (1 + alpha0 / 2) * tf.reduce_sum(x**2, axis=1)))
        return lhs - rhs

    def func(x):
        return (1 - np.linalg.norm(x, axis=1, keepdims=True)**2)**(1 +
                                                                   alpha0 / 2)

    geom = dde.geometry.Disk([0, 0], 1)

    observe_x = geom.random_points(30)
    ptset = dde.bc.PointSet(observe_x)
    observe_y = dde.DirichletBC(geom, ptset.values_to_func(func(observe_x)),
                                lambda x, _: ptset.inside(x))

    data = dde.data.FPDE(
        geom,
        fpde,
        alpha,
        observe_y,
        [8, 100],
        num_domain=64,
        anchors=observe_x,
        solution=func,
    )

    net = dde.maps.FNN([2] + [20] * 4 + [1], "tanh", "Glorot normal")
    net.apply_output_transform(
        lambda x, y: (1 - tf.reduce_sum(x**2, axis=1, keepdims=True)) * y)

    model = dde.Model(data, net)
    model.compile("adam", lr=1e-3, loss_weights=[1, 100])
    variable = dde.callbacks.VariableValue(alpha, period=1000)
    losshistory, train_state = model.train(epochs=10000, callbacks=[variable])
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
def main():
    C = tf.Variable(2.0)

    def pde(x, y):
        dy_x = tf.gradients(y, x)[0]
        dy_x, dy_t = dy_x[:, 0:1], dy_x[:, 1:]
        dy_xx = tf.gradients(dy_x, x)[0][:, 0:1]
        return (
            dy_t - C * dy_xx + tf.exp(-x[:, 1:]) *
            (tf.sin(np.pi * x[:, 0:1]) - np.pi**2 * tf.sin(np.pi * x[:, 0:1])))

    def func(x):
        return np.sin(np.pi * x[:, 0:1]) * np.exp(-x[:, 1:])

    geom = dde.geometry.Interval(-1, 1)
    timedomain = dde.geometry.TimeDomain(0, 1)
    geomtime = dde.geometry.GeometryXTime(geom, timedomain)

    bc = dde.DirichletBC(geomtime, func, lambda _, on_boundary: on_boundary)
    ic = dde.IC(geomtime, func, lambda _, on_initial: on_initial)

    observe_x = np.vstack((np.linspace(-1, 1, num=10), np.full((10), 1))).T
    ptset = dde.bc.PointSet(observe_x)
    observe_y = dde.DirichletBC(geomtime,
                                ptset.values_to_func(func(observe_x)),
                                lambda x, _: ptset.inside(x))

    data = dde.data.TimePDE(
        geomtime,
        pde,
        [bc, ic, observe_y],
        num_domain=40,
        num_boundary=20,
        num_initial=10,
        anchors=observe_x,
        solution=func,
        num_test=10000,
    )

    layer_size = [2] + [32] * 3 + [1]
    activation = "tanh"
    initializer = "Glorot uniform"
    net = dde.maps.FNN(layer_size, activation, initializer)

    model = dde.Model(data, net)

    model.compile("adam", lr=0.001, metrics=["l2 relative error"])
    variable = dde.callbacks.VariableValue(C, period=1000)
    losshistory, train_state = model.train(epochs=50000, callbacks=[variable])

    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
Exemple #7
0
def pinn(data_t, data_y, meal_t=None, meal_q=None):
    if meal_t is None:
        # meal_t = tf.convert_to_tensor([300, 650, 1100], dtype=tf.float32)
        t1 = (tf.tanh(tf.Variable(0, trainable=True, dtype=tf.float32)) +
              1) * 10 + 290
        t2 = (tf.tanh(tf.Variable(0, trainable=True, dtype=tf.float32)) +
              1) * 10 + 640
        t3 = (tf.tanh(tf.Variable(0, trainable=True, dtype=tf.float32)) +
              1) * 10 + 1090
        meal_t = tf.convert_to_tensor([t1, t2, t3])

        meal_q = (
            tf.tanh(tf.Variable([0, 0, 0], trainable=True, dtype=tf.float32)) +
            1) / 2 * 900 + 100

    Vp = 3
    # Vp = tf.tanh(tf.Variable(0, trainable=True, dtype=tf.float32)) + 3
    Vi = 11
    # Vi = (tf.tanh(tf.Variable(0, trainable=True, dtype=tf.float32)) + 1) * 4 + 7
    Vg = 10
    # Vg = (tf.tanh(tf.Variable(0, trainable=True, dtype=tf.float32)) + 1) * 3 + 7
    # E = 0.2
    E = (tf.tanh(tf.Variable(0, trainable=True, dtype=tf.float32)) +
         1) * 0.1 + 0.1
    # tp = 6
    tp = (tf.tanh(tf.Variable(0, trainable=True, dtype=tf.float32)) +
          1) * 2 + 4
    # ti = 100
    ti = (tf.tanh(tf.Variable(0, trainable=True, dtype=tf.float32)) +
          1) * 40 + 60
    # td = 12
    td = (tf.tanh(tf.Variable(0, trainable=True, dtype=tf.float32)) +
          1) * 25 / 6 + 25 / 3
    # k = 1 / 120
    k = get_variable(0.0083)
    # Rm = 209 / 100  # scaled
    Rm = get_variable(2.09)
    # a1 = 6.6
    a1 = get_variable(6.6)
    # C1 = 300 / 100  # scaled
    C1 = get_variable(3)
    # C2 = 144 / 100  # scaled
    C2 = get_variable(1.44)
    C3 = 100 / 100  # scaled
    # C4 = 80 / 100  # scaled
    C4 = get_variable(0.8)
    # C5 = 26 / 100  # scaled
    C5 = get_variable(0.26)
    # Ub = 72 / 100  # scaled
    Ub = get_variable(0.72)
    # U0 = 4 / 100  # scaled
    U0 = get_variable(0.04)
    # Um = 90 / 100  # scaled
    Um = get_variable(0.9)
    # Rg = 180 / 100  # scaled
    Rg = get_variable(1.8)
    # alpha = 7.5
    alpha = get_variable(7.5)
    # beta = 1.772
    beta = get_variable(1.772)

    var_list = [
        Vp, Vi, Vg, E, tp, ti, td, k, Rm, a1, C1, C2, C3, C4, C5, Ub, U0, Um,
        Rg, alpha, beta
    ]

    def ODE(t, y):
        Ip = y[:, 0:1]
        Ii = y[:, 1:2]
        G = y[:, 2:3]
        h1 = y[:, 3:4]
        h2 = y[:, 4:5]
        h3 = y[:, 5:6]

        f1 = Rm * tf.math.sigmoid(G / (Vg * C1) - a1)
        f2 = Ub * (1 - tf.math.exp(-G / (Vg * C2)))
        kappa = (1 / Vi + 1 / (E * ti)) / C4
        f3 = (U0 + Um /
              (1 + tf.pow(tf.maximum(kappa * Ii, 1e-3), -beta))) / (Vg * C3)
        f4 = Rg * tf.sigmoid(alpha * (1 - h3 / (Vp * C5)))
        dt = t - meal_t
        IG = tf.math.reduce_sum(
            0.5 * meal_q * k * tf.math.exp(-k * dt) * (tf.math.sign(dt) + 1),
            axis=1,
            keepdims=True,
        )
        tmp = E * (Ip / Vp - Ii / Vi)
        return [
            tf.gradients(Ip, t)[0] - (f1 - tmp - Ip / tp),
            tf.gradients(Ii, t)[0] - (tmp - Ii / ti),
            tf.gradients(G, t)[0] - (f4 + IG - f2 - f3 * G),
            tf.gradients(h1, t)[0] - (Ip - h1) / td,
            tf.gradients(h2, t)[0] - (h1 - h2) / td,
            tf.gradients(h3, t)[0] - (h2 - h3) / td,
        ]

    geom = dde.geometry.TimeDomain(data_t[0, 0], data_t[-1, 0])

    # Observes
    n = len(data_t)
    idx = np.append(
        np.random.choice(np.arange(1, n - 1), size=n // 5, replace=False),
        [0, n - 1])
    ptset = dde.bc.PointSet(data_t[idx])
    inside = lambda x, _: ptset.inside(x)
    observe_y2 = dde.DirichletBC(geom,
                                 ptset.values_to_func(data_y[idx, 2:3]),
                                 inside,
                                 component=2)
    np.savetxt("glucose_input.dat", np.hstack((data_t[idx], data_y[idx, 2:3])))

    data = dde.data.PDE(geom, ODE, [observe_y2], anchors=data_t)

    net = dde.maps.FNN([1] + [128] * 3 + [6], "swish", "Glorot normal")

    def feature_transform(t):
        t = 0.01 * t
        return tf.concat(
            (t, tf.sin(t), tf.sin(2 * t), tf.sin(3 * t), tf.sin(
                4 * t), tf.sin(5 * t)),
            axis=1,
        )

    net.apply_feature_transform(feature_transform)

    def output_transform(t, y):
        idx = 1799
        k = (data_y[idx] - data_y[0]) / (data_t[idx] - data_t[0])
        b = (data_t[idx] * data_y[0] -
             data_t[0] * data_y[idx]) / (data_t[idx] - data_t[0])
        linear = k * t + b
        factor = tf.math.tanh(t) * tf.math.tanh(idx - t)
        return linear + factor * tf.constant([1, 1, 1e2, 1, 1, 1]) * y

    net.apply_output_transform(output_transform)

    model = dde.Model(data, net)

    checkpointer = dde.callbacks.ModelCheckpoint("./model/model.ckpt",
                                                 verbose=1,
                                                 save_better_only=True,
                                                 period=1000)
    variable = dde.callbacks.VariableValue(
        [v for v in var_list if isinstance(v, tf.Tensor)],
        period=1000,
        filename="variables.dat",
    )
    callbacks = [checkpointer, variable]
    if isinstance(meal_t, tf.Tensor):
        variable_meal = dde.callbacks.VariableValue(
            [meal_t[0], meal_t[1], meal_t[2], meal_q[0], meal_q[1], meal_q[2]],
            period=1000,
            filename="variables_meal.dat",
            precision=3,
        )
        callbacks.append(variable_meal)

    model.compile("adam", lr=1e-3, loss_weights=[0, 0, 0, 0, 0, 0, 1e-2])
    model.train(epochs=2000, display_every=1000)
    model.compile("adam", lr=1e-3, loss_weights=[1, 1, 1e-2, 1, 1, 1, 1e-2])
    losshistory, train_state = model.train(
        epochs=600000 if not isinstance(meal_t, tf.Tensor) else 1500000,
        display_every=1000,
        callbacks=callbacks,
        # model_restore_path="./model/model.ckpt-"
    )
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
    var_list = [
        model.sess.run(v) if isinstance(v, tf.Tensor) else v for v in var_list
    ]
    if not isinstance(meal_t, tf.Tensor):
        return var_list
    return var_list, model.sess.run(meal_t), model.sess.run(meal_q)
Exemple #8
0
def get_variable(v):
    low, up = v * 0.2, v * 1.8
    l = (up - low) / 2
    return l * tf.tanh(tf.Variable(0, trainable=True,
                                   dtype=tf.float32)) + l + low
"""Backend supported: tensorflow.compat.v1"""
import deepxde as dde
import numpy as np
from deepxde.backend import tf
from scipy.special import gamma

alpha0 = 1.8
alpha = tf.Variable(1.5)


def fpde(x, y, int_mat):
    """(D_{0+}^alpha + D_{1-}^alpha) u(x)"""
    if isinstance(int_mat, (list, tuple)) and len(int_mat) == 3:
        int_mat = tf.SparseTensor(*int_mat)
        lhs = tf.sparse_tensor_dense_matmul(int_mat, y)
    else:
        lhs = tf.matmul(int_mat, y)
    lhs /= 2 * tf.cos(alpha * np.pi / 2)
    rhs = gamma(alpha0 + 2) * x
    return lhs - rhs[:tf.size(lhs)]


def func(x):
    return x * (np.abs(1 - x**2))**(alpha0 / 2)


def main():
    geom = dde.geometry.Interval(-1, 1)

    observe_x = np.linspace(-1, 1, num=20)[:, None]
    observe_y = dde.PointSetBC(observe_x, func(observe_x))
Exemple #10
0
def pinn(data_t, data_y):
    k1 = tf.math.softplus(tf.Variable(1, trainable=True, dtype=tf.float32))
    kd1 = tf.math.softplus(tf.Variable(1, trainable=True, dtype=tf.float32)) * 10
    kd2 = tf.math.softplus(tf.Variable(1, trainable=True, dtype=tf.float32)) * 10
    k3 = tf.math.softplus(tf.Variable(1, trainable=True, dtype=tf.float32)) * 10
    kd3 = tf.math.softplus(tf.Variable(1, trainable=True, dtype=tf.float32)) * 100
    kd4 = tf.math.softplus(tf.Variable(1, trainable=True, dtype=tf.float32))
    k5 = tf.math.softplus(tf.Variable(1, trainable=True, dtype=tf.float32)) * 1e4
    kd5 = tf.math.softplus(tf.Variable(1, trainable=True, dtype=tf.float32)) * 0.01
    kd6 = tf.math.softplus(tf.Variable(1, trainable=True, dtype=tf.float32)) * 0.1

    var_list = [k1, kd1, kd2, k3, kd3, kd4, k5, kd5, kd6]

    def ODE(t, y):
        v4_1 = kd1 * y[:, 4:5]
        v4_2 = kd2 * y[:, 4:5]
        v5_3 = kd3 * y[:, 5:6]
        v5_4 = kd4 * y[:, 5:6]
        v7_5 = kd5 * y[:, 7:8]
        v7_6 = kd6 * y[:, 7:8]
        v03 = k1 * y[:, 3:4] * y[:, 0:1]
        v12 = k3 * y[:, 1:2] * y[:, 2:3]
        v36 = k5 * y[:, 6:7] * y[:, 3:4]

        return [
            tf.gradients(y[:, 0:1], t)[0] - (-v03 + v4_1),
            tf.gradients(y[:, 1:2], t)[0] - (v4_2 - v12 + v5_3 + v5_4),
            tf.gradients(y[:, 2:3], t)[0] - (-v12 + v5_3),
            tf.gradients(y[:, 3:4], t)[0] - (v5_4 - v03 + v4_1 - v36 + v7_5 + v4_2),
            tf.gradients(y[:, 4:5], t)[0] - (-v4_2 + v03 - v4_1),
            tf.gradients(y[:, 5:6], t)[0] - (-v5_4 + v12 - v5_3),
            tf.gradients(y[:, 6:7], t)[0] - (-v36 + v7_5 + v7_6),
            tf.gradients(y[:, 7:8], t)[0] - (v36 - v7_5 - v7_6),
        ]

    geom = dde.geometry.TimeDomain(data_t[0, 0], data_t[-1, 0])

    # Right point
    def boundary(x, _):
        return np.isclose(x[0], data_t[len(data_t) // 2, 0])

    y1 = data_y[len(data_t) // 2]
    bc0 = dde.DirichletBC(geom, lambda X: y1[0], boundary, component=0)
    bc1 = dde.DirichletBC(geom, lambda X: y1[1], boundary, component=1)
    bc2 = dde.DirichletBC(geom, lambda X: y1[2], boundary, component=2)
    bc3 = dde.DirichletBC(geom, lambda X: y1[3], boundary, component=3)
    bc4 = dde.DirichletBC(geom, lambda X: y1[4], boundary, component=4)
    bc5 = dde.DirichletBC(geom, lambda X: y1[5], boundary, component=5)
    bc6 = dde.DirichletBC(geom, lambda X: y1[6], boundary, component=6)
    bc7 = dde.DirichletBC(geom, lambda X: y1[7], boundary, component=7)

    # Observes
    n = len(data_t)
    idx = np.append(
        np.random.choice(np.arange(1, n - 1), size=n // 5, replace=False), [0, n - 1]
    )
    ptset = dde.bc.PointSet(data_t[idx])
    inside = lambda x, _: ptset.inside(x)
    observe_y3 = dde.DirichletBC(
        geom, ptset.values_to_func(data_y[idx, 3:4]), inside, component=3
    )
    np.savetxt("apoptosis_input.dat", np.hstack((data_t[idx], data_y[idx, 3:4])))

    data = dde.data.PDE(
        geom, ODE, [bc0, bc1, bc2, bc3, bc4, bc5, bc6, bc7, observe_y3], anchors=data_t,
    )

    net = dde.maps.FNN([1] + [256] * 4 + [8], "swish", "Glorot normal")

    def feature_transform(t):
        t = 0.1 * t
        return tf.concat((t, tf.exp(-t)), axis=1,)

    net.apply_feature_transform(feature_transform)

    def output_transform(t, y):
        return (
            data_y[0]
            + tf.math.tanh(t) * tf.constant([1, 1, 1, 1, 0.01, 0.1, 0.01, 0.01]) * y
        )

    net.apply_output_transform(output_transform)

    model = dde.Model(data, net)

    checkpointer = dde.callbacks.ModelCheckpoint(
        "./model/model.ckpt", verbose=1, save_better_only=True, period=1000
    )
    variable = dde.callbacks.VariableValue(
        var_list, period=1000, filename="variables.dat", precision=3,
    )
    callbacks = [checkpointer, variable]

    model.compile(
        "adam",
        lr=1e-3,
        # loss_weights=[1, 1, 1, 1, 1e3, 1, 1, 1] + [1, 1, 1, 1, 100, 10, 100, 100, 1e2],  # noiseless
        loss_weights=[1, 1, 1, 1, 1e3, 1, 1, 1] + [1, 1, 1, 1, 100, 10, 100, 100, 10],  # death noise
        # loss_weights=[1, 1, 1, 1, 1e3, 1, 1, 1] + [1, 1, 1, 1, 100, 10, 100, 100, 1],  # survival noise
    )
    losshistory, train_state = model.train(
        # epochs=700000,  # death noiseless
        epochs=1500000,  # death noise
        # epochs=1500000,  # survival
        display_every=1000,
        callbacks=callbacks,
    )
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
    var_list = [model.sess.run(v) for v in var_list]
    return var_list
Exemple #11
0
"""Backend supported: tensorflow.compat.v1, tensorflow

Documentation: https://deepxde.readthedocs.io/en/latest/demos/lorenz.inverse.html
"""
import deepxde as dde
import numpy as np
from deepxde.backend import tf


def gen_traindata():
    data = np.load("dataset/Lorenz.npz")
    return data["t"], data["y"]


C1 = tf.Variable(1.0)
C2 = tf.Variable(1.0)
C3 = tf.Variable(1.0)


def Lorenz_system(x, y):
    """Lorenz system.
    dy1/dx = 10 * (y2 - y1)
    dy2/dx = y1 * (28 - y3) - y2
    dy3/dx = y1 * y2 - 8/3 * y3
    """
    y1, y2, y3 = y[:, 0:1], y[:, 1:2], y[:, 2:]
    dy1_x = dde.grad.jacobian(y, x, i=0)
    dy2_x = dde.grad.jacobian(y, x, i=1)
    dy3_x = dde.grad.jacobian(y, x, i=2)
    return [
        dy1_x - C1 * (y2 - y1),
Exemple #12
0
import numpy as np
from deepxde.backend import tf


def gen_traindata():
    data = np.load("dataset/reaction.npz")
    t, x, ca, cb = data["t"], data["x"], data["Ca"], data["Cb"]
    X, T = np.meshgrid(x, t)
    X = np.reshape(X, (-1, 1))
    T = np.reshape(T, (-1, 1))
    Ca = np.reshape(ca, (-1, 1))
    Cb = np.reshape(cb, (-1, 1))
    return np.hstack((X, T)), Ca, Cb


kf = tf.Variable(0.05)
D = tf.Variable(1.0)


def pde(x, y):
    ca, cb = y[:, 0:1], y[:, 1:2]
    dca_t = dde.grad.jacobian(y, x, i=0, j=1)
    dca_xx = dde.grad.hessian(y, x, component=0, i=0, j=0)
    dcb_t = dde.grad.jacobian(y, x, i=1, j=1)
    dcb_xx = dde.grad.hessian(y, x, component=1, i=0, j=0)
    eq_a = dca_t - 1e-3 * D * dca_xx + kf * ca * cb**2
    eq_b = dcb_t - 1e-3 * D * dcb_xx + 2 * kf * ca * cb**2
    return [eq_a, eq_b]


def fun_bc(x):
Exemple #13
0
def pinn(data_t, data_y, noise):
    J0 = tf.math.softplus(tf.Variable(0, trainable=True, dtype=tf.float32))
    k1 = tf.math.softplus(tf.Variable(0, trainable=True,
                                      dtype=tf.float32)) * 100
    k2 = tf.math.softplus(tf.Variable(0, trainable=True, dtype=tf.float32))
    k3 = tf.math.softplus(tf.Variable(0, trainable=True,
                                      dtype=tf.float32)) * 10
    k4 = tf.math.softplus(tf.Variable(0, trainable=True,
                                      dtype=tf.float32)) * 100
    k5 = tf.math.softplus(tf.Variable(0, trainable=True, dtype=tf.float32))
    k6 = tf.math.softplus(tf.Variable(0, trainable=True,
                                      dtype=tf.float32)) * 10
    k = tf.math.softplus(tf.Variable(0, trainable=True, dtype=tf.float32))
    kappa = tf.math.softplus(tf.Variable(0, trainable=True,
                                         dtype=tf.float32)) * 10
    q = tf.math.softplus(tf.Variable(0, trainable=True, dtype=tf.float32))
    K1 = tf.math.softplus(tf.Variable(0, trainable=True, dtype=tf.float32))
    psi = tf.math.softplus(tf.Variable(0, trainable=True,
                                       dtype=tf.float32)) * 0.1
    N = tf.math.softplus(tf.Variable(0, trainable=True, dtype=tf.float32))
    A = tf.math.softplus(tf.Variable(0, trainable=True, dtype=tf.float32))

    var_list = [J0, k1, k2, k3, k4, k5, k6, k, kappa, q, K1, psi, N, A]

    def ODE(t, y):
        v1 = k1 * y[:, 0:1] * y[:, 5:6] / (1 +
                                           tf.maximum(y[:, 5:6] / K1, 1e-3)**q)
        v2 = k2 * y[:, 1:2] * (N - y[:, 4:5])
        v3 = k3 * y[:, 2:3] * (A - y[:, 5:6])
        v4 = k4 * y[:, 3:4] * y[:, 4:5]
        v5 = k5 * y[:, 5:6]
        v6 = k6 * y[:, 1:2] * y[:, 4:5]
        v7 = k * y[:, 6:7]
        J = kappa * (y[:, 3:4] - y[:, 6:7])
        return [
            tf.gradients(y[:, 0:1], t)[0] - (J0 - v1),
            tf.gradients(y[:, 1:2], t)[0] - (2 * v1 - v2 - v6),
            tf.gradients(y[:, 2:3], t)[0] - (v2 - v3),
            tf.gradients(y[:, 3:4], t)[0] - (v3 - v4 - J),
            tf.gradients(y[:, 4:5], t)[0] - (v2 - v4 - v6),
            tf.gradients(y[:, 5:6], t)[0] - (-2 * v1 + 2 * v3 - v5),
            tf.gradients(y[:, 6:7], t)[0] - (psi * J - v7),
        ]

    geom = dde.geometry.TimeDomain(data_t[0, 0], data_t[-1, 0])

    # Right point
    def boundary(x, _):
        return np.isclose(x[0], data_t[-1, 0])

    y1 = data_y[-1]
    bc0 = dde.DirichletBC(geom, lambda X: y1[0], boundary, component=0)
    bc1 = dde.DirichletBC(geom, lambda X: y1[1], boundary, component=1)
    bc2 = dde.DirichletBC(geom, lambda X: y1[2], boundary, component=2)
    bc3 = dde.DirichletBC(geom, lambda X: y1[3], boundary, component=3)
    bc4 = dde.DirichletBC(geom, lambda X: y1[4], boundary, component=4)
    bc5 = dde.DirichletBC(geom, lambda X: y1[5], boundary, component=5)
    bc6 = dde.DirichletBC(geom, lambda X: y1[6], boundary, component=6)

    # Observes
    n = len(data_t)
    idx = np.append(
        np.random.choice(np.arange(1, n - 1), size=n // 4, replace=False),
        [0, n - 1])
    ptset = dde.bc.PointSet(data_t[idx])
    inside = lambda x, _: ptset.inside(x)
    observe_y4 = dde.DirichletBC(geom,
                                 ptset.values_to_func(data_y[idx, 4:5]),
                                 inside,
                                 component=4)
    observe_y5 = dde.DirichletBC(geom,
                                 ptset.values_to_func(data_y[idx, 5:6]),
                                 inside,
                                 component=5)
    np.savetxt("glycolysis_input.dat",
               np.hstack((data_t[idx], data_y[idx, 4:5], data_y[idx, 5:6])))

    data = dde.data.PDE(
        geom,
        ODE,
        [bc0, bc1, bc2, bc3, bc4, bc5, bc6, observe_y4, observe_y5],
        anchors=data_t,
    )

    net = dde.maps.FNN([1] + [128] * 3 + [7], "swish", "Glorot normal")

    def feature_transform(t):
        return tf.concat(
            (
                t,
                tf.sin(t),
                tf.sin(2 * t),
                tf.sin(3 * t),
                tf.sin(4 * t),
                tf.sin(5 * t),
                tf.sin(6 * t),
            ),
            axis=1,
        )

    net.apply_feature_transform(feature_transform)

    def output_transform(t, y):
        return (
            data_y[0] +
            tf.math.tanh(t) * tf.constant([1, 1, 0.1, 0.1, 0.1, 1, 0.1]) * y)

    net.apply_output_transform(output_transform)

    model = dde.Model(data, net)

    checkpointer = dde.callbacks.ModelCheckpoint("./model/model.ckpt",
                                                 verbose=1,
                                                 save_better_only=True,
                                                 period=1000)
    variable = dde.callbacks.VariableValue(
        var_list,
        period=1000,
        filename="variables.dat",
        precision=3,
    )
    callbacks = [checkpointer, variable]

    bc_weights = [1, 1, 10, 10, 10, 1, 10]
    if noise >= 0.1:
        bc_weights = [w * 10 for w in bc_weights]
    data_weights = [1e3, 1]
    # Large noise requires small data_weights
    if noise >= 0.1:
        data_weights = [w / 10 for w in data_weights]
    model.compile("adam",
                  lr=1e-3,
                  loss_weights=[0] * 7 + bc_weights + data_weights)
    model.train(epochs=1000, display_every=1000)
    ode_weights = [1e-3, 1e-3, 1e-2, 1e-2, 1e-2, 1e-3, 1]
    # Large noise requires large ode_weights
    if noise > 0:
        ode_weights = [10 * w for w in ode_weights]
    model.compile("adam",
                  lr=1e-3,
                  loss_weights=ode_weights + bc_weights + data_weights)
    losshistory, train_state = model.train(
        epochs=900000 if noise == 0 else 2000000,
        display_every=1000,
        callbacks=callbacks,
        disregard_previous_best=True,
        # model_restore_path="./model/model.ckpt-"
    )
    dde.saveplot(losshistory, train_state, issave=True, isplot=True)
    var_list = [model.sess.run(v) for v in var_list]
    return var_list
Exemple #14
0
"""Backend supported: tensorflow.compat.v1, tensorflow"""
import deepxde as dde
import numpy as np
from deepxde.backend import tf

C = tf.Variable(2.0)


def pde(x, y):
    dy_t = dde.grad.jacobian(y, x, i=0, j=1)
    dy_xx = dde.grad.hessian(y, x, i=0, j=0)
    return (dy_t - C * dy_xx + tf.exp(-x[:, 1:]) *
            (tf.sin(np.pi * x[:, 0:1]) - np.pi**2 * tf.sin(np.pi * x[:, 0:1])))


def func(x):
    return np.sin(np.pi * x[:, 0:1]) * np.exp(-x[:, 1:])


geom = dde.geometry.Interval(-1, 1)
timedomain = dde.geometry.TimeDomain(0, 1)
geomtime = dde.geometry.GeometryXTime(geom, timedomain)

bc = dde.DirichletBC(geomtime, func, lambda _, on_boundary: on_boundary)
ic = dde.IC(geomtime, func, lambda _, on_initial: on_initial)

observe_x = np.vstack((np.linspace(-1, 1, num=10), np.full((10), 1))).T
observe_y = dde.PointSetBC(observe_x, func(observe_x), component=0)

data = dde.data.TimePDE(
    geomtime,