def test_functional_exceptions(variable_x): x = variable_x with pytest.raises(TypeError): f = sn.Functional(x) with pytest.raises(TypeError): ft = sn.Functional('ft', 2 * [10]) with pytest.raises(TypeError): ft = sn.Functional('ft', x, 'tanh') with pytest.raises(TypeError): ft = sn.Functional('ft', x, 2 * [10], 12)
def functional_hxy(variable_x, variable_y): return sn.Functional('hxy', [variable_x, variable_y], 2 * [10], 'tanh', kernel_initializer=1., bias_initializer=0.)
def functional_gxy(variable_x, variable_y): return sn.Functional('gxy', [variable_x, variable_y], 2 * [10], 'tanh')
def functional_gx(variable_x): return sn.Functional('gx', variable_x, 2 * [10], 'tanh')
def test_activation_function_exceptions(variable_x): x = variable_x assert sn.is_functional(sn.Functional('ft', x, 2 * [10], 'l-tanh')) assert sn.is_functional(sn.Functional('gt', x, 2 * [10], 'g-tanh')) assert sn.is_functional( sn.Functional('rt', x, 2 * [10], ['tanh', 'sin', 'l-tanh']))
def test_functional(variable_x): x = variable_x ft = sn.Functional('ft', x, 2 * [10], 'tanh') assert sn.is_functional(ft)
T_Final = 0.1 NX = 20 NY = 20 NT = 40 NTOT = NX * NY * NT EPOCHS = 20000 BATCH = 1000 data = gen_grid(NX, NY, NT, Lx, Ly, T_Final) x = sn.Variable('x', dtype='float64') y = sn.Variable('y', dtype='float64') t = sn.Variable('t', dtype='float64') u = sn.Functional('u', [x, y, t], 4 * [40], 'l-tanh') L1 = D * (diff(u, x, order=4) + diff(u, y, order=4) + 2 * diff(diff(u, x, order=2), y, order=2)) + rho * diff(u, t, order=2) TOL = 0.001 C1 = (1 - sign(t - TOL)) * (u - sin(np.pi * x) * sin(np.pi * y)) C2 = (1 - sign(t - TOL)) * (diff(u, t)) C3 = (1 - sign(x - TOL)) * u C4 = (1 - sign(y - TOL)) * u C5 = (1 + sign(x - (1 - TOL))) * u C6 = (1 + sign(y - (1 - TOL))) * u C7 = (1 - sign(x - TOL)) * (diff(u, x, order=2)) C8 = (1 - sign(y - TOL)) * (diff(u, y, order=2))
T_Final = 0.5 NX = 40 NY = 40 NT = 20 EPOCHS = 2000 BATCH = 1000 data = gen_grid(NX, NY, NT, Lx, Ly, T_Final) # x_data, y_data, t_data = np.meshgrid(np.linspace(0, Lx, NX), np.linspace(0, Ly, NY), np.linspace(0, T_Final, NT)) x = sn.Variable('x', dtype='float64') y = sn.Variable('y', dtype='float64') t = sn.Variable('t', dtype='float64') u = sn.Functional('u', [x, y, t], 4 * [20], 'sin') c = 1.0 L1 = c * (diff(u, x, order=2) + diff(u, y, order=2)) - diff(u, t, order=2) TOL = 0.001 C1 = (1 - sign(t - TOL)) * (u - sin(np.pi * x) * sin(np.pi * y)) C2 = (1 - sign(t - TOL)) * (diff(u, t)) C3 = (1 - sign(x - TOL)) * u C4 = (1 - sign(y - TOL)) * u C5 = (1 + sign(x - (1 - TOL))) * u C6 = (1 + sign(y - (1 - TOL))) * u m = sn.SciModel( [x, y, t], [sn.PDE(L1), C1, C2, C3, C4, C5, C6],
from sciann.utils.math import diff import sciann as sn sn.set_random_seed(1234) # Synthetic data generated from sin function over [0, 2pi] x_true = np.linspace(0, np.pi*2, 10000) y_true = np.sin(x_true) # The network inputs should be defined with Variable. x = Variable('x', dtype='float64') xf = sn.fourier(x, 10) # Each network is defined by Functional. y1 = sn.Field('y1', 10) y2 = sn.Field('y2', 10) y1, y2 = sn.Functional([y1,y2], xf, [10, 10, 10], 'l-tanh', output_activation='tanh') y = sn.Functional('y', [xf*y1, xf*y2]) d = Parameter(10.0, inputs=x, name='d') # Define the target (output) of your model. c1 = Data(y) L = d*diff(y, x, order=2) + y # The model is formed with input `x` and condition `c1`. model = SciModel(x, [c1, sn.PDE(L)]) # Tra: .train runs the optimization and finds the parameters. history = model.train(