コード例 #1
0
def main(args):

    if args is None:
        args = {
            'layer_sizes': [2, 21, 21, 21, 21, 1],
            'run_functions_eagerly': True,
            'epoch_adam': 20,
            'epoch_lbfgs': 20,
            'lbfgs_eager': False,
            'isAdaptive': True,
            'dist_training': False,
            'dict_adaptive': {
                "residual": [True],
                "BCs": [True, False, False]
            },
            'N_x': 100,
            'N_t': 50,
            'N_f': 5000,
            'batch_sz': 200,
        }

    layer_sizes = args['layer_sizes']
    run_functions_eagerly = args['run_functions_eagerly']
    epoch_adam = args['epoch_adam']
    epoch_lbfgs = args['epoch_lbfgs']
    lbfgs_eager = args['lbfgs_eager']
    isAdaptive = args['isAdaptive']
    dist_training = args['dist_training']
    dict_adaptive = args['dict_adaptive']
    N_x = args['N_x']
    N_t = args['N_t']
    N_f = args['N_f']
    batch_sz = args['batch_sz']

    tf.config.run_functions_eagerly(run_functions_eagerly)

    Domain = DomainND(["x", "t"], time_var='t')
    Domain.add("x", [-1.0, 1.0], N_x)
    Domain.add("t", [0.0, 1.0], N_t)
    Domain.generate_collocation_points(N_f)

    def func_ic(x):
        return -np.sin(x * math.pi)

    init = IC(Domain, [func_ic], var=[['x']])
    upper_x = dirichletBC(Domain, val=0.0, var='x', target="upper")
    lower_x = dirichletBC(Domain, val=0.0, var='x', target="lower")

    BCs = [init, upper_x, lower_x]

    def f_model(u_model, x, t):
        with tf.GradientTape(persistent=True) as tape:
            tape.watch(x)
            tape.watch(t)
            u = u_model(tf.concat([x, t], 1))
            u_x = tape.gradient(u, x)

        u_xx = tape.gradient(u_x, x)
        u_t = tape.gradient(u, t)

        f_u = u_t + u * u_x - 0.01 / tf.constant(math.pi) * u_xx

        return f_u

    ## Which loss functions will have adaptive weights
    # "residual" should a tuple for the case of multiple residual equation
    # BCs have to follow the same order as the previously defined BCs list
    dict_adaptive = dict_adaptive

    ## Weights initialization
    # dictionary with keys "residual" and "BCs". Values must be a tuple with dimension
    # equal to the number of residuals and boundary conditions, respectively

    if dict_adaptive["residual"][0] == False:
        init_residual = None
    else:
        init_residual = tf.ones([N_f, 1])

    if dict_adaptive["BCs"][0] == False:
        init_IC = None
    else:
        init_IC = tf.ones([N_x, 1])

    if dict_adaptive["BCs"][1] == False:
        init_BC1 = None
    else:
        init_BC1 = tf.ones([N_t, 1])

    if dict_adaptive["BCs"][2] == False:
        init_BC2 = None
    else:
        init_BC2 = tf.ones([N_t, 1])

    init_weights = {
        "residual": [init_residual],
        "BCs": [init_IC, init_BC1, init_BC2]
    }

    model = CollocationSolverND()
    model.compile(layer_sizes,
                  f_model,
                  Domain,
                  BCs,
                  isAdaptive=isAdaptive,
                  dict_adaptive=dict_adaptive,
                  init_weights=init_weights,
                  dist=dist_training)

    model.fit(tf_iter=epoch_adam,
              newton_iter=epoch_lbfgs,
              newton_eager=lbfgs_eager,
              batch_sz=batch_sz)

    return
コード例 #2
0

def func_upper_y(x):
    return -sin(constant(math.pi) * x) * sin(constant(math.pi))


lower_x = dirichletBC(Domain, val=0.0, var='x', target="upper")
upper_x = FunctionDirichletBC(Domain, fun=[func_upper_x], var='x', target="upper", func_inputs=["y"], n_values=10)
upper_y = FunctionDirichletBC(Domain, fun=[func_upper_y], var='y', target="upper", func_inputs=["x"], n_values=10)
lower_y = dirichletBC(Domain, val=0.0, var='y', target="lower")

BCs = [upper_x, lower_x, upper_y, lower_y]

layer_sizes = [2, 16, 16, 1]

model = CollocationSolverND()
model.compile(layer_sizes, f_model, Domain, BCs)
model.tf_optimizer = tf.keras.optimizers.Adam(lr=.005)
model.fit(tf_iter=4000)

# get exact solution
nx, ny = (11, 11)
x = np.linspace(0, 1, nx)
y = np.linspace(0, 1, ny)

xv, yv = np.meshgrid(x, y)

x = np.reshape(x, (-1, 1))
y = np.reshape(y, (-1, 1))

# Exact analytical soln is available:
コード例 #3
0
BCs = [init, x_periodic]


def f_model(u_model, x, t):
    u = u_model(tf.concat([x, t], 1))
    u_x = tf.gradients(u, x)
    u_xx = tf.gradients(u_x, x)
    u_t = tf.gradients(u, t)
    c1 = tdq.utils.constant(.0001)
    c2 = tdq.utils.constant(5.0)
    f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u
    return f_u

layer_sizes = [2, 128, 128, 128, 128, 1]

model = CollocationSolverND()
model.compile(layer_sizes, f_model, Domain, BCs)
model.fit(tf_iter=10000, newton_iter=10000)

# Load high-fidelity data for error calculation
data = scipy.io.loadmat('AC.mat')

Exact = data['uu']
Exact_u = np.real(Exact)

# t = data['tt'].flatten()[:,None]
# x = data['x'].flatten()[:,None]

x = Domain.domaindict[0]['xlinspace']
t = Domain.domaindict[1]["tlinspace"]
コード例 #4
0
ファイル: testing1D.py プロジェクト: udemirezen/TensorDiffEq
    u = u_model(tf.concat([x, t], 1))
    u_x = tf.gradients(u, x)
    u_xx = tf.gradients(u_x, x)
    u_t = tf.gradients(u, t)
    c1 = tdq.utils.constant(.0001)
    c2 = tdq.utils.constant(5.0)
    f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u
    return f_u


col_weights = tf.Variable(tf.random.uniform([N_f, 1]), trainable=True, dtype=tf.float32)
u_weights = tf.Variable(100 * tf.random.uniform([512, 1]), trainable=True, dtype=tf.float32)

layer_sizes = [2, 128, 128, 128, 128, 1]

model = CollocationSolverND()
model.compile(layer_sizes, f_model, Domain, BCs, isAdaptive=True, col_weights=col_weights, u_weights=u_weights)
model.fit(tf_iter=10000, newton_iter=10000)

# Load high-fidelity data for error calculation
data = scipy.io.loadmat('AC.mat')

Exact = data['uu']
Exact_u = np.real(Exact)

# t = data['tt'].flatten()[:,None]
# x = data['x'].flatten()[:,None]

x = Domain.domaindict[0]['xlinspace']
t = Domain.domaindict[1]["tlinspace"]
コード例 #5
0
def main(args):

    if args is None:
        args = {
            'layer_sizes': [2, 21, 21, 21, 21, 1],
            'run_functions_eagerly': False,
            'epoch_adam': 20,
            'epoch_lbfgs': 20,
            'lbfgs_eager': False,
            'isAdaptive': True,
            'dist_training': False,
            'dict_adaptive': {
                "residual": [True],
                "BCs": [False, False]
            },
            'N_x': 100,
            'N_t': 50,
            'N_f': 5000,
            'batch_sz': 200,
        }

    layer_sizes = args['layer_sizes']
    run_functions_eagerly = args['run_functions_eagerly']
    epoch_adam = args['epoch_adam']
    epoch_lbfgs = args['epoch_lbfgs']
    lbfgs_eager = args['lbfgs_eager']
    isAdaptive = args['isAdaptive']
    dist_training = args['dist_training']
    dict_adaptive = args['dict_adaptive']
    N_x = args['N_x']
    N_t = args['N_t']
    N_f = args['N_f']
    batch_sz = args['batch_sz']

    tf.config.run_functions_eagerly(run_functions_eagerly)

    Domain = DomainND(["x", "t"], time_var='t')

    Domain.add("x", [-1.0, 1.0], N_x)
    Domain.add("t", [0.0, 1.0], N_t)

    Domain.generate_collocation_points(N_f)

    def func_ic(x):
        return x**2 * np.cos(math.pi * x)

    # Conditions to be considered at the boundaries for the periodic BC
    def deriv_model(u_model, x, t):
        u = u_model(tf.concat([x, t], 1))
        u_x = tf.gradients(u, x)[0]

        return u, u_x

    init = IC(Domain, [func_ic], var=[['x']])
    x_periodic = periodicBC(Domain, ['x'], [deriv_model])

    BCs = [init, x_periodic]

    def f_model(u_model, x, t):
        u = u_model(tf.concat([x, t], 1))
        u_x = tf.gradients(u, x)
        u_xx = tf.gradients(u_x, x)
        u_t = tf.gradients(u, t)
        c1 = tdq.utils.constant(.0001)
        c2 = tdq.utils.constant(5.0)
        f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u
        return f_u

    ## Which loss functions will have adaptive weights
    # "residual" should a tuple for the case of multiple residual equation
    # BCs have to follow the same order as the previously defined BCs list
    dict_adaptive = dict_adaptive

    ## Weights initialization
    # dictionary with keys "residual" and "BCs". Values must be a tuple with dimension
    # equal to the number of residuals and boundary conditions, respectively

    if dict_adaptive["residual"][0] == False:
        init_residual = None
    else:
        init_residual = tf.random.uniform([N_f, 1])

    if dict_adaptive["BCs"][0] == False:
        init_IC = None
    else:
        init_IC = 100 * tf.random.uniform([N_x, 1])

    if dict_adaptive["BCs"][1] == False:
        init_BC = None
    else:
        init_BC = tf.random.uniform([N_t, 1])

    init_weights = {"residual": [init_residual], "BCs": [init_IC, init_BC]}

    model = CollocationSolverND()
    model.compile(layer_sizes,
                  f_model,
                  Domain,
                  BCs,
                  isAdaptive=isAdaptive,
                  dict_adaptive=dict_adaptive,
                  init_weights=init_weights,
                  dist=dist_training)

    model.fit(tf_iter=epoch_adam,
              newton_iter=epoch_lbfgs,
              newton_eager=lbfgs_eager,
              batch_sz=batch_sz)

    return
コード例 #6
0

def f_model(u_model, x, t):
    u = u_model(tf.concat([x, t], 1))
    u_x = tf.gradients(u, x)
    u_xx = tf.gradients(u_x, x)
    u_t = tf.gradients(u, t)
    c1 = tdq.utils.constant(.0001)
    c2 = tdq.utils.constant(5.0)
    f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u
    return f_u


layer_sizes = [2, 128, 128, 128, 128, 1]

model = CollocationSolverND()
model.compile(layer_sizes, f_model, Domain, BCs, dist=True)
model.fit(tf_iter=1001)
print("training pass 1 completed")
model.fit(tf_iter=1001)

# Load high-fidelity data for error calculation
data = scipy.io.loadmat('AC.mat')

Exact = data['uu']
Exact_u = np.real(Exact)

# t = data['tt'].flatten()[:,None]
# x = data['x'].flatten()[:,None]

x = Domain.domaindict[0]['xlinspace']