def train(): # define output folder. if not os.path.isdir(args.outputpath[0]): os.mkdir(args.outputpath[0]) output_file_name = os.path.join(args.outputpath[0], args.outputprefix[0]) fname = output_file_name + "_{}_".format(args.actf[0]) + "x".join( [str(x) for x in args.layers]) # Neural Network Setup. x = Variable("x", dtype=args.dtype[0]) y = Variable("y", dtype=args.dtype[0]) if args.independent_networks[0]: Uxy = Functional("Uxy", [x, y], args.layers, args.actf[0]) Vxy = Functional("Vxy", [x, y], args.layers, args.actf[0]) Sxx = Functional("Sxx", [x, y], args.layers, args.actf[0]) Syy = Functional("Syy", [x, y], args.layers, args.actf[0]) Sxy = Functional("Sxy", [x, y], args.layers, args.actf[0]) else: Uxy, Vxy, Sxx, Syy, Sxy = Functional( ["Uxy", "Vxy", "Sxx", "Syy", "Sxy"], [x, y], args.layers, args.actf[0]).split() lame1 = Parameter(2.0, inputs=[x, y], name="lame1") lame2 = Parameter(2.0, inputs=[x, y], name="lame2") C11 = (2 * lame2 + lame1) C12 = lame1 C33 = 2 * lame2 Exx = diff(Uxy, x) Eyy = diff(Vxy, y) Exy = (diff(Uxy, y) + diff(Vxy, x)) * 0.5 # Define constraints d1 = Data(Uxy) d2 = Data(Vxy) d3 = Data(Sxx) d4 = Data(Syy) d5 = Data(Sxy) c1 = Tie(Sxx, Exx * C11 + Eyy * C12) c2 = Tie(Syy, Eyy * C11 + Exx * C12) c3 = Tie(Sxy, Exy * C33) Lx = diff(Sxx, x) + diff(Sxy, y) Ly = diff(Sxy, x) + diff(Syy, y) # Define the optimization model (set of inputs and constraints) model = SciModel(inputs=[x, y], targets=[d1, d2, d3, d4, d5, c1, c2, c3, Lx, Ly], loss_func="mse") with open("{}_summary".format(fname), "w") as fobj: model.summary(print_fn=lambda x: fobj.write(x + '\n')) # Prepare training data ## Training grid XMIN, XMAX = 0.0, 1.0 YMIN, YMAX = 0.0, 1.0 Xmesh = np.linspace(XMIN, XMAX, args.numx[0]).reshape((-1, 1)) Ymesh = np.linspace(YMIN, YMAX, args.numy[0]).reshape((-1, 1)) X, Y = np.meshgrid(Xmesh, Ymesh) input_data = [X.reshape(-1, 1), Y.reshape(-1, 1)] ## data associated to constrains defined earlier # Define constraints data_d1 = dispx(input_data) data_d2 = dispy(input_data) data_d3 = stressxx(input_data) data_d4 = stressyy(input_data) data_d5 = stressxy(input_data) data_c1 = 'zeros' data_c2 = 'zeros' data_c3 = 'zeros' data_Lx = bodyfx(input_data) data_Ly = bodyfy(input_data) target_data = [ data_d1, data_d2, data_d3, data_d4, data_d5, data_c1, data_c2, data_c3, data_Lx, data_Ly ] # Train the model training_time = time.time() history = model.train(x_true=input_data, y_true=target_data, epochs=args.epochs[0], batch_size=args.batchsize[0], shuffle=args.shuffle[0], learning_rate=args.learningrate[0], stop_after=args.stopafter[0], verbose=args.verbose[0], save_weights_to="{}_WEIGHTS".format(fname), save_weights_freq=args.savefreq[0]) training_time = time.time() - training_time for loss in history.history: np.savetxt(fname + "_{}".format("_".join(loss.split("/"))), np.array(history.history[loss]).reshape(-1, 1)) time_steps = np.linspace(0, training_time, len(history.history["loss"])) np.savetxt(fname + "_Time", time_steps.reshape(-1, 1)) # Post process the trained model. Xmesh_plot = np.linspace(XMIN, XMAX, args.numxplot[0]).reshape((-1, 1)) Ymesh_plot = np.linspace(YMIN, YMAX, args.numyplot[0]).reshape((-1, 1)) X_plot, Y_plot = np.meshgrid(Xmesh_plot, Ymesh_plot) input_plot = [X_plot.reshape(-1, 1), Y_plot.reshape(-1, 1)] lame1_pred = lame1.eval(model, input_plot) lame2_pred = lame2.eval(model, input_plot) Uxy_pred = Uxy.eval(model, input_plot) Vxy_pred = Vxy.eval(model, input_plot) Exx_pred = Exx.eval(model, input_plot) Eyy_pred = Eyy.eval(model, input_plot) Exy_pred = Exy.eval(model, input_plot) Sxx_pred = Sxx.eval(model, input_plot) Syy_pred = Syy.eval(model, input_plot) Sxy_pred = Sxy.eval(model, input_plot) np.savetxt(fname + "_Xmesh", X_plot, delimiter=', ') np.savetxt(fname + "_Ymesh", Y_plot, delimiter=', ') np.savetxt(fname + "_lame1", lame1_pred.reshape(X_plot.shape), delimiter=', ') np.savetxt(fname + "_lame2", lame2_pred.reshape(X_plot.shape), delimiter=', ') np.savetxt(fname + "_Uxy", Uxy_pred.reshape(X_plot.shape), delimiter=', ') np.savetxt(fname + "_Vxy", Vxy_pred.reshape(X_plot.shape), delimiter=', ') np.savetxt(fname + "_Exx", Exx_pred.reshape(X_plot.shape), delimiter=', ') np.savetxt(fname + "_Eyy", Eyy_pred.reshape(X_plot.shape), delimiter=', ') np.savetxt(fname + "_Exy", Exy_pred.reshape(X_plot.shape), delimiter=', ') np.savetxt(fname + "_Sxx", Sxx_pred.reshape(X_plot.shape), delimiter=', ') np.savetxt(fname + "_Syy", Syy_pred.reshape(X_plot.shape), delimiter=', ') np.savetxt(fname + "_Sxy", Sxy_pred.reshape(X_plot.shape), delimiter=', ')
NX = 20 NY = 20 NT = 40 NTOT = NX * NY * NT EPOCHS = 20000 BATCH = 1000 data = gen_grid(NX, NY, NT, Lx, Ly, T_Final) x = sn.Variable('x', dtype='float64') y = sn.Variable('y', dtype='float64') t = sn.Variable('t', dtype='float64') u = sn.Functional('u', [x, y, t], 4 * [40], 'l-tanh') L1 = D * (diff(u, x, order=4) + diff(u, y, order=4) + 2 * diff(diff(u, x, order=2), y, order=2)) + rho * diff(u, t, order=2) TOL = 0.001 C1 = (1 - sign(t - TOL)) * (u - sin(np.pi * x) * sin(np.pi * y)) C2 = (1 - sign(t - TOL)) * (diff(u, t)) C3 = (1 - sign(x - TOL)) * u C4 = (1 - sign(y - TOL)) * u C5 = (1 + sign(x - (1 - TOL))) * u C6 = (1 + sign(y - (1 - TOL))) * u C7 = (1 - sign(x - TOL)) * (diff(u, x, order=2)) C8 = (1 - sign(y - TOL)) * (diff(u, y, order=2)) C9 = (1 + sign(x - (1 - TOL))) * (diff(u, x, order=2)) C10 = (1 + sign(y - (1 - TOL))) * (diff(u, y, order=2))
x_data = x_data.reshape(-1, 1) y_data = y_data.reshape(-1, 1) t_data = t_data.reshape(-1, 1) Lambd11 = np.pi * np.sqrt(2) u_data = np.sin(np.pi * x_data) * np.sin(np.pi * y_data) * np.cos( Lambd11 * t_data) x = sn.Variable('x', dtype='float64') y = sn.Variable('y', dtype='float64') t = sn.Variable('t', dtype='float64') u = sn.Functional('u', [x, y, t], 4 * [20], 'sin') c = sn.Parameter(np.random.rand(), inputs=[x, y, t], name='c') L1 = c * (diff(u, x, order=2) + diff(u, y, order=2)) - diff(u, t, order=2) m = sn.SciModel( [x, y, t], [sn.PDE(L1), sn.Data(u)], # load_weights_from='membrane_inv-weights.hdf5' ) inputs = [x_data, y_data, t_data] targets = ['zeros', u_data] h = m.train(inputs, targets, batch_size=BATCH, learning_rate=0.001, reduce_lr_after=50,
NY = 40 NT = 20 EPOCHS = 2000 BATCH = 1000 data = gen_grid(NX, NY, NT, Lx, Ly, T_Final) # x_data, y_data, t_data = np.meshgrid(np.linspace(0, Lx, NX), np.linspace(0, Ly, NY), np.linspace(0, T_Final, NT)) x = sn.Variable('x', dtype='float64') y = sn.Variable('y', dtype='float64') t = sn.Variable('t', dtype='float64') u = sn.Functional('u', [x, y, t], 4 * [20], 'sin') c = 1.0 L1 = c * (diff(u, x, order=2) + diff(u, y, order=2)) - diff(u, t, order=2) TOL = 0.001 C1 = (1 - sign(t - TOL)) * (u - sin(np.pi * x) * sin(np.pi * y)) C2 = (1 - sign(t - TOL)) * (diff(u, t)) C3 = (1 - sign(x - TOL)) * u C4 = (1 - sign(y - TOL)) * u C5 = (1 + sign(x - (1 - TOL))) * u C6 = (1 + sign(y - (1 - TOL))) * u m = sn.SciModel( [x, y, t], [sn.PDE(L1), C1, C2, C3, C4, C5, C6], # load_weights_from='membrane-weights.hdf5' )
x = Variable('x', dtype='float64') xf = sn.fourier(x, 10) # Each network is defined by Functional. y1 = sn.Field('y1', 10) y2 = sn.Field('y2', 10) y1, y2 = sn.Functional([y1,y2], xf, [10, 10, 10], 'l-tanh', output_activation='tanh') y = sn.Functional('y', [xf*y1, xf*y2]) d = Parameter(10.0, inputs=x, name='d') # Define the target (output) of your model. c1 = Data(y) L = d*diff(y, x, order=2) + y # The model is formed with input `x` and condition `c1`. model = SciModel(x, [c1, sn.PDE(L)]) # Tra: .train runs the optimization and finds the parameters. history = model.train( x_true, [y_true, 'zeros'], batch_size=32, epochs=100, adaptive_weights={"method": "NTK", "freq": 10}, log_parameters=[d] ) # used to evaluate the model after the training.