示例#1
0
    def __init__(self, data, c_data, eqns, layers, Pec, Rey):
        # specs & flow properties
        self.Pec = Pec
        self.Rey = Rey
        # data
        [self.data, self.c_data, self.eqns] = [data, c_data, eqns]

        # physics "uninformed" neural networks
        self.net = neural_net(layers, data, USE_CUDA, device)
        self.net.load_state_dict(torch.load(name, map_location=device))
        print("Previous state loaded.")
        if (USE_CUDA):
            self.net.to(device)
示例#2
0
 def __init__(self, data, c_data, eqns, layers, Pec, Rey):
     # specs & flow properties
     self.layers = layers
     self.Pec = Pec
     self.Rey = Rey
     # data
     [self.data, self.c_data, self.eqns] = [data, c_data, eqns]
     # dat manager util
     self.dm = dataManager(using_visdom, version)
     # physics "uninformed" neural networks
     self.net = neural_net(self.layers, data, USE_CUDA, device)
     #self.net.load_state_dict(torch.load("../Results/model_updating_v10.pth"))
     #print("loading v10")
     self.net.to(device)
示例#3
0
    def __init__(self, t_data, x_data, y_data, c_data, t_eqns, x_eqns, y_eqns,
                 t_inlet, x_inlet, y_inlet, u_inlet, v_inlet, t_cyl, x_cyl,
                 y_cyl, layers, batch_size, Pec, Rey):

        # specs
        self.layers = layers
        self.batch_size = batch_size

        # flow properties
        self.Pec = Pec
        self.Rey = Rey

        # data
        [self.t_data, self.x_data, self.y_data,
         self.c_data] = [t_data, x_data, y_data, c_data]
        [self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]
        [self.t_inlet, self.x_inlet, self.y_inlet, self.u_inlet,
         self.v_inlet] = [t_inlet, x_inlet, y_inlet, u_inlet, v_inlet]
        [self.t_cyl, self.x_cyl, self.y_cyl] = [t_cyl, x_cyl, y_cyl]

        # placeholders
        [self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
        [self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
        [
            self.t_inlet_tf, self.x_inlet_tf, self.y_inlet_tf, self.u_inlet_tf,
            self.v_inlet_tf
        ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(5)]
        [self.t_cyl_tf, self.x_cyl_tf, self.y_cyl_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]

        # physics "uninformed" neural networks
        self.net_cuvp = neural_net(self.t_data,
                                   self.x_data,
                                   self.y_data,
                                   layers=self.layers)

        [
            self.c_data_pred, self.u_data_pred, self.v_data_pred,
            self.p_data_pred
        ] = self.net_cuvp(self.t_data_tf, self.x_data_tf, self.y_data_tf)

        # physics "uninformed" neural networks (data at the inlet)
        [_, self.u_inlet_pred, self.v_inlet_pred,
         _] = self.net_cuvp(self.t_inlet_tf, self.x_inlet_tf, self.y_inlet_tf)

        # physics "uninformed" neural networks (data on the cylinder)
        [_, self.u_cyl_pred, self.v_cyl_pred,
         _] = self.net_cuvp(self.t_cyl_tf, self.x_cyl_tf, self.y_cyl_tf)

        # physics "informed" neural networks
        [
            self.c_eqns_pred, self.u_eqns_pred, self.v_eqns_pred,
            self.p_eqns_pred
        ] = self.net_cuvp(self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf)

        [
            self.e1_eqns_pred, self.e2_eqns_pred, self.e3_eqns_pred,
            self.e4_eqns_pred
        ] = Navier_Stokes_2D(self.c_eqns_pred, self.u_eqns_pred,
                             self.v_eqns_pred, self.p_eqns_pred,
                             self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf,
                             self.Pec, self.Rey)

        # gradients required for the lift and drag forces
        [
            self.u_x_eqns_pred, self.v_x_eqns_pred, self.u_y_eqns_pred,
            self.v_y_eqns_pred
        ] = Gradient_Velocity_2D(self.u_eqns_pred, self.v_eqns_pred,
                                 self.x_eqns_tf, self.y_eqns_tf)

        # loss
        self.loss = mean_squared_error(self.c_data_pred, self.c_data_tf) + \
                    mean_squared_error(self.u_inlet_pred, self.u_inlet_tf) + \
                    mean_squared_error(self.v_inlet_pred, self.v_inlet_tf) + \
                    mean_squared_error(self.u_cyl_pred, 0.0) + \
                    mean_squared_error(self.v_cyl_pred, 0.0) + \
                    mean_squared_error(self.e1_eqns_pred, 0.0) + \
                    mean_squared_error(self.e2_eqns_pred, 0.0) + \
                    mean_squared_error(self.e3_eqns_pred, 0.0) + \
                    mean_squared_error(self.e4_eqns_pred, 0.0)

        # optimizers
        self.learning_rate = tf.placeholder(tf.float32, shape=[])
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.train_op = self.optimizer.minimize(self.loss)

        self.sess = tf_session()
示例#4
0
    def __init__(self, t_data, x_data, y_data, c_data, t_eqns, x_eqns, y_eqns,
                 layers, batch_size, Pec, Rey):

        # specs
        self.layers = layers
        self.batch_size = batch_size

        # flow properties
        self.Pec = Pec
        self.Rey = Rey

        # data
        [self.t_data, self.x_data, self.y_data,
         self.c_data] = [t_data, x_data, y_data, c_data]
        [self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]

        # placeholders
        [self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
        [self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]

        # physics "uninformed" neural networks
        self.net_cuvp = neural_net(self.t_data,
                                   self.x_data,
                                   self.y_data,
                                   layers=self.layers)

        [
            self.c_data_pred, self.u_data_pred, self.v_data_pred,
            self.p_data_pred
        ] = self.net_cuvp(self.t_data_tf, self.x_data_tf, self.y_data_tf)

        # physics "informed" neural networks
        [
            self.c_eqns_pred, self.u_eqns_pred, self.v_eqns_pred,
            self.p_eqns_pred
        ] = self.net_cuvp(self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf)

        [
            self.e1_eqns_pred, self.e2_eqns_pred, self.e3_eqns_pred,
            self.e4_eqns_pred
        ] = Navier_Stokes_2D(self.c_eqns_pred, self.u_eqns_pred,
                             self.v_eqns_pred, self.p_eqns_pred,
                             self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf,
                             self.Pec, self.Rey)

        [
            self.eps11dot_eqns_pred, self.eps12dot_eqns_pred,
            self.eps22dot_eqns_pred
        ] = Strain_Rate_2D(self.u_eqns_pred, self.v_eqns_pred, self.x_eqns_tf,
                           self.y_eqns_tf)

        # loss
        self.loss = mean_squared_error(self.c_data_pred, self.c_data_tf) + \
                    mean_squared_error(self.e1_eqns_pred, 0.0) + \
                    mean_squared_error(self.e2_eqns_pred, 0.0) + \
                    mean_squared_error(self.e3_eqns_pred, 0.0) + \
                    mean_squared_error(self.e4_eqns_pred, 0.0)

        # optimizers
        self.learning_rate = tf.placeholder(tf.float32, shape=[])
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.train_op = self.optimizer.minimize(self.loss)

        self.sess = tf_session()
示例#5
0
文件: hfm.py 项目: brendancolvert/HFM
    def __init__(self, t_data, x_data, y_data, z_data, c_data, t_eqns, x_eqns,
                 y_eqns, z_eqns, layers, batch_size, Pec, Rey):

        # specs
        self.layers = layers
        self.batch_size = batch_size

        #         # flow properties
        #         self.Pec = Pec
        #         self.Rey = Rey

        # flow properties
        self.Pec = tf.Variable(Pec, dtype=tf.float32, trainable=True)
        self.Rey = tf.Variable(Rey, dtype=tf.float32, trainable=True)

        # data
        [self.t_data, self.x_data, self.y_data, self.z_data,
         self.c_data] = [t_data, x_data, y_data, z_data, c_data]
        [self.t_eqns, self.x_eqns, self.y_eqns,
         self.z_eqns] = [t_eqns, x_eqns, y_eqns, z_eqns]

        # placeholders
        [
            self.t_data_tf, self.x_data_tf, self.y_data_tf, self.z_data_tf,
            self.c_data_tf
        ] = [
            tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
            for _ in range(5)
        ]
        [self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf, self.z_eqns_tf] = [
            tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
            for _ in range(4)
        ]

        # physics "uninformed" neural networks
        self.net_cuvwp = neural_net(self.t_data,
                                    self.x_data,
                                    self.y_data,
                                    self.z_data,
                                    layers=self.layers)

        [
            self.c_data_pred, self.u_data_pred, self.v_data_pred,
            self.w_data_pred, self.p_data_pred
        ] = self.net_cuvwp(self.t_data_tf, self.x_data_tf, self.y_data_tf,
                           self.z_data_tf)

        # physics "informed" neural networks
        [
            self.c_eqns_pred, self.u_eqns_pred, self.v_eqns_pred,
            self.w_eqns_pred, self.p_eqns_pred
        ] = self.net_cuvwp(self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf,
                           self.z_eqns_tf)

        [
            self.e1_eqns_pred, self.e2_eqns_pred, self.e3_eqns_pred,
            self.e4_eqns_pred, self.e5_eqns_pred
        ] = Navier_Stokes_3D(self.c_eqns_pred, self.u_eqns_pred,
                             self.v_eqns_pred, self.w_eqns_pred,
                             self.p_eqns_pred, self.t_eqns_tf, self.x_eqns_tf,
                             self.y_eqns_tf, self.z_eqns_tf, self.Pec,
                             self.Rey)

        # loss
        self.loss = mean_squared_error(self.c_data_pred, self.c_data_tf) + \
                    mean_squared_error(self.e1_eqns_pred, 0.0) + \
                    mean_squared_error(self.e2_eqns_pred, 0.0) + \
                    mean_squared_error(self.e3_eqns_pred, 0.0) + \
                    mean_squared_error(self.e4_eqns_pred, 0.0) + \
                    mean_squared_error(self.e5_eqns_pred, 0.0)

        # optimizers
        self.learning_rate = tf.compat.v1.placeholder(tf.float32, shape=[])
        self.optimizer = tf.compat.v1.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.train_op = self.optimizer.minimize(self.loss)

        self.sess = tf_session()
示例#6
0
    def __init__(self, t_data, S_data, t_eqns, layers):

        self.D = S_data.shape[1]

        self.t_min = t_data.min(0)
        self.t_max = t_data.max(0)

        self.S_scale = tf.Variable(S_data.std(0),
                                   dtype=tf.float32,
                                   trainable=False)

        # data on all the species (only some are used as input)
        self.t_data, self.S_data = t_data, S_data
        self.t_eqns = t_eqns

        # layers
        self.layers = layers

        #        self.J0 = tf.Variable(2.5, dtype=tf.float32, trainable=False)
        #        self.k1 = tf.Variable(100.0, dtype=tf.float32, trainable=False)
        #        self.k2 = tf.Variable(6.0, dtype=tf.float32, trainable=False)
        #        self.k3 = tf.Variable(16.0, dtype=tf.float32, trainable=False)
        #        self.k4 = tf.Variable(100.0, dtype=tf.float32, trainable=False)
        #        self.k5 = tf.Variable(1.28, dtype=tf.float32, trainable=False)
        #        self.k6 = tf.Variable(12.0, dtype=tf.float32, trainable=False)
        #        self.k = tf.Variable(1.8, dtype=tf.float32, trainable=False)
        #        self.kappa = tf.Variable(13.0, dtype=tf.float32, trainable=False)
        #        self.q = tf.Variable(4.0, dtype=tf.float32, trainable=False)
        #        self.K1 = tf.Variable(0.52, dtype=tf.float32, trainable=False)
        #        self.psi = tf.Variable(0.1, dtype=tf.float32, trainable=False)
        #        self.N = tf.Variable(1.0, dtype=tf.float32, trainable=False)
        #        self.A = tf.Variable(4.0, dtype=tf.float32, trainable=False)

        self.logJ0 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logk1 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logk2 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logk3 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logk4 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logk5 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logk6 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logk = tf.Variable(1.0, dtype=tf.float32, trainable=True)
        self.logkappa = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logq = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logK1 = tf.Variable(1.0, dtype=tf.float32, trainable=True)
        self.logpsi = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logN = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logA = tf.Variable(0.0, dtype=tf.float32, trainable=True)

        self.var_list_eqns = [
            self.logJ0, self.logk1, self.logk2, self.logk3, self.logk4,
            self.logk5, self.logk6, self.logk, self.logkappa, self.logq,
            self.logK1, self.logpsi, self.logN, self.logA
        ]

        self.J0 = tf.exp(self.logJ0)
        self.k1 = tf.exp(self.logk1)
        self.k2 = tf.exp(self.logk2)
        self.k3 = tf.exp(self.logk3)
        self.k4 = tf.exp(self.logk4)
        self.k5 = tf.exp(self.logk5)
        self.k6 = tf.exp(self.logk6)
        self.k = tf.exp(self.logk)
        self.kappa = tf.exp(self.logkappa)
        self.q = tf.exp(self.logq)
        self.K1 = tf.exp(self.logK1)
        self.psi = tf.exp(self.logpsi)
        self.N = tf.exp(self.logN)
        self.A = tf.exp(self.logA)

        # placeholders for data
        self.t_data_tf = tf.placeholder(tf.float32, shape=[None, 1])
        self.S_data_tf = tf.placeholder(tf.float32, shape=[None, self.D])
        self.t_eqns_tf = tf.placeholder(tf.float32, shape=[None, 1])
        self.learning_rate = tf.placeholder(tf.float32, shape=[])

        # physics uninformed neural networks
        self.net_sysbio = neural_net(layers=self.layers)

        self.H_data = 2.0 * (self.t_data_tf - self.t_min) / (self.t_max -
                                                             self.t_min) - 1.0
        self.S_data_pred = self.S_data[0, :] + self.S_scale * (
            self.H_data + 1.0) * self.net_sysbio(self.H_data)

        # physics informed neural networks
        self.H_eqns = 2.0 * (self.t_eqns_tf - self.t_min) / (self.t_max -
                                                             self.t_min) - 1.0
        self.S_eqns_pred = self.S_data[0, :] + self.S_scale * (
            self.H_eqns + 1.0) * self.net_sysbio(self.H_eqns)

        self.E_eqns_pred = self.SysODE(self.S_eqns_pred, self.t_eqns_tf)

        #        self.S_scale = 0.9*self.S_scale + 0.1*tf.math.reduce_std(self.S_eqns_pred, 0)
        #        scale_list = tf.unstack(self.S_scale)
        #        scale_list[4:6] = self.S_data.std(0)[4:6]
        #        self.S_scale = tf.stack(scale_list)

        # loss
        self.loss_data = mean_squared_error(
            self.S_data_tf[:, 4:6] / self.S_scale[4:6],
            self.S_data_pred[:, 4:6] / self.S_scale[4:6])
        self.loss_eqns = mean_squared_error(0.0,
                                            self.E_eqns_pred / self.S_scale)
        self.loss_auxl = mean_squared_error(
            self.S_data_tf[-1, :] / self.S_scale[:],
            self.S_data_pred[-1, :] / self.S_scale[:])
        self.loss = 0.95 * self.loss_data + 0.05 * self.loss_eqns + 0.05 * self.loss_auxl

        # optimizers
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.optimizer_para = tf.train.AdamOptimizer(learning_rate=0.001)

        self.train_op = self.optimizer.minimize(self.loss,
                                                var_list=[
                                                    self.net_sysbio.weights,
                                                    self.net_sysbio.biases,
                                                    self.net_sysbio.gammas
                                                ])
        self.trainpara_op = self.optimizer_para.minimize(
            self.loss, var_list=self.var_list_eqns)
        self.sess = tf_session()
示例#7
0
    def __init__(self, t_data, S_data, t_eqns, layers, meal_tq):

        self.D = S_data.shape[1]

        self.t_min = t_data.min(0)
        self.t_max = t_data.max(0)

        #        self.S_scale = tf.Variable(np.array(self.D*[1.0]), dtype=tf.float32, trainable=False)
        self.S_scale = S_data.std(0)

        # data on all the species (only some are used as input)
        self.t_data, self.S_data = t_data, S_data
        self.t_eqns = t_eqns

        # layers
        self.layers = layers

        self.mt = 2.0 * (meal_tq[0] - self.t_min) / (self.t_max -
                                                     self.t_min) - 1.0
        self.mq = meal_tq[1]

        #        self.k = tf.Variable(1.0/120.0, dtype=tf.float32, trainable=False)
        self.Rm = tf.Variable(209.0 / 100.0, dtype=tf.float32, trainable=False)
        self.Vg = tf.Variable(10.0, dtype=tf.float32, trainable=False)
        self.C1 = tf.Variable(300.0 / 100.0, dtype=tf.float32, trainable=False)
        self.a1 = tf.Variable(6.6, dtype=tf.float32, trainable=False)
        #        self.Ub = tf.Variable(72.0/100.0, dtype=tf.float32, trainable=False)
        #        self.C2 = tf.Variable(144.0/100.0, dtype=tf.float32, trainable=False)
        #        self.U0 = tf.Variable(4.0/100.0, dtype=tf.float32, trainable=False)
        #        self.Um = tf.Variable(90.0/100.0, dtype=tf.float32, trainable=False)
        #        self.C3 = tf.Variable(100.0/100.0, dtype=tf.float32, trainable=False)
        #        self.C4 = tf.Variable(80.0/100.0, dtype=tf.float32, trainable=False)
        self.Vi = tf.Variable(11.0, dtype=tf.float32, trainable=False)
        self.E = tf.Variable(0.2, dtype=tf.float32, trainable=False)
        self.ti = tf.Variable(100.0, dtype=tf.float32, trainable=False)
        #        self.beta = tf.Variable(1.772, dtype=tf.float32, trainable=False)
        #        self.Rg = tf.Variable(180.0/100.0, dtype=tf.float32, trainable=False)
        #        self.alpha = tf.Variable(7.5, dtype=tf.float32, trainable=False)
        self.Vp = tf.Variable(3.0, dtype=tf.float32, trainable=False)
        #        self.C5 = tf.Variable(26.0/100.0, dtype=tf.float32, trainable=False)
        self.tp = tf.Variable(6.0, dtype=tf.float32, trainable=False)
        #        self.td = tf.Variable(12.0, dtype=tf.float32, trainable=False)

        self.logk = tf.Variable(-6.0, dtype=tf.float32, trainable=True)
        #        self.logRm = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        #        self.logVg = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        #        self.logC1 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        #        self.loga1 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logUb = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logC2 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logU0 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logUm = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logC3 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logC4 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        #        self.logVi = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        #        self.logE = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        #        self.logti = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logbeta = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logRg = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logalpha = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        #        self.logVp = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logC5 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        #        self.logtp = tf.Variable(0.0, dtype=tf.float32, trainable=True)
        self.logtd = tf.Variable(0.0, dtype=tf.float32, trainable=True)

        self.var_list_eqns = [
            self.logk, self.logUb, self.logC2, self.logU0, self.logUm,
            self.logC3, self.logC4, self.logbeta, self.logRg, self.logalpha,
            self.logC5, self.logtd
        ]

        self.k = tf.exp(self.logk)
        #        self.Rm = tf.exp(self.logRm)
        #        self.Vg = tf.exp(self.logVg)
        #        self.C1 = tf.exp(self.logC1)
        #        self.a1 = tf.exp(self.loga1)
        self.Ub = tf.exp(self.logUb)
        self.C2 = tf.exp(self.logC2)
        self.U0 = tf.exp(self.logU0)
        self.Um = tf.exp(self.logUm)
        self.C3 = tf.exp(self.logC3)
        self.C4 = tf.exp(self.logC4)
        #        self.Vi = tf.exp(self.logVi)
        #        self.E = tf.exp(self.logE)
        #        self.ti = tf.exp(self.logti)
        self.beta = tf.exp(self.logbeta)
        self.Rg = tf.exp(self.logRg)
        self.alpha = tf.exp(self.logalpha)
        #        self.Vp = tf.exp(self.logVp)
        self.C5 = tf.exp(self.logC5)
        #        self.tp = tf.exp(self.logtp)
        self.td = tf.exp(self.logtd)

        # placeholders for data
        self.t_data_tf = tf.placeholder(tf.float32, shape=[None, 1])
        self.S_data_tf = tf.placeholder(tf.float32, shape=[None, self.D])
        self.t_eqns_tf = tf.placeholder(tf.float32, shape=[None, 1])
        self.mt_tf = tf.placeholder(tf.float32, shape=[None, self.mt.shape[1]])
        self.mq_tf = tf.placeholder(tf.float32, shape=[None, self.mq.shape[1]])
        self.learning_rate = tf.placeholder(tf.float32, shape=[])

        # physics uninformed neural networks
        self.net_sysbio = neural_net(layers=self.layers)

        self.H_data = 2.0 * (self.t_data_tf - self.t_min) / (self.t_max -
                                                             self.t_min) - 1.0
        self.S_data_pred = self.S_data[0, :] + self.S_scale * (
            self.H_data + 1.0) * self.net_sysbio(self.H_data)

        # physics informed neural networks
        self.H_eqns = 2.0 * (self.t_eqns_tf - self.t_min) / (self.t_max -
                                                             self.t_min) - 1.0
        self.S_eqns_pred = self.S_data[0, :] + self.S_scale * (
            self.H_eqns + 1.0) * self.net_sysbio(self.H_eqns)

        self.E_eqns_pred, self.IG = self.SysODE(self.S_eqns_pred,
                                                self.t_eqns_tf, self.H_eqns,
                                                self.mt_tf, self.mq_tf)

        # Adaptive S_scale
        #        self.S_scale = 0.9*self.S_scale + 0.1*tf.math.reduce_std(self.S_eqns_pred, 0)
        #        scale_list = tf.unstack(self.S_scale)
        #        scale_list[2] = self.S_data.std(0)[2]
        #        self.S_scale = tf.stack(scale_list)

        # loss
        self.loss_data = mean_squared_error(
            self.S_data_tf[:, 2:3] / self.S_scale[2:3],
            self.S_data_pred[:, 2:3] / self.S_scale[2:3])
        self.loss_eqns = mean_squared_error(0.0,
                                            self.E_eqns_pred / self.S_scale)
        self.loss_auxl = mean_squared_error(
            self.S_data_tf[-1, :] / self.S_scale,
            self.S_data_pred[-1, :] / self.S_scale)
        self.loss = 0.99 * self.loss_data + 0.01 * self.loss_eqns + 0.01 * self.loss_auxl

        # optimizers
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.optimizer_para = tf.train.AdamOptimizer(learning_rate=0.001)

        self.train_op = self.optimizer.minimize(self.loss,
                                                var_list=[
                                                    self.net_sysbio.weights,
                                                    self.net_sysbio.biases,
                                                    self.net_sysbio.gammas
                                                ])
        self.trainpara_op = self.optimizer_para.minimize(
            self.loss, var_list=self.var_list_eqns)
        self.sess = tf_session()
示例#8
0
    def __init__(self, t_data, x_data, y_data, c_data, u_data, v_data, p_data,
                 x_ref, y_ref, t_eqns, x_eqns, y_eqns, layers, batch_size, Pec,
                 Rey):

        # specs
        self.layers = layers
        self.batch_size = batch_size

        # flow properties
        self.Pec = Pec
        self.Rey = Rey

        # data
        [self.t_data, self.x_data, self.y_data,
         self.c_data] = [t_data, x_data, y_data, c_data]
        [self.u_data, self.v_data, self.p_data] = [u_data, v_data, p_data]
        [self.x_ref, self.y_ref] = [x_ref, y_ref]
        [self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]

        # placeholders
        [self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
        [self.u_data_tf, self.v_data_tf, self.p_data_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
        [self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]

        # physics "uninformed" neural networks
        self.net_cuvp = neural_net(self.t_data,
                                   self.x_data,
                                   self.y_data,
                                   layers=self.layers)

        [
            self.c_data_pred, self.u_data_pred, self.v_data_pred,
            self.p_data_pred
        ] = self.net_cuvp(self.t_data_tf, self.x_data_tf, self.y_data_tf)

        [_, _, _,
         self.p_ref_pred] = self.net_cuvp(self.t_data_tf,
                                          self.x_data_tf * 0.0 + self.x_ref,
                                          self.y_data_tf * 0.0 + self.y_ref)

        # physics "informed" neural networks
        [
            self.c_eqns_pred, self.u_eqns_pred, self.v_eqns_pred,
            self.p_eqns_pred
        ] = self.net_cuvp(self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf)

        [
            self.e1_eqns_pred, self.e2_eqns_pred, self.e3_eqns_pred,
            self.e4_eqns_pred
        ] = Navier_Stokes_2D(self.c_eqns_pred, self.u_eqns_pred,
                             self.v_eqns_pred, self.p_eqns_pred,
                             self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf,
                             self.Pec, self.Rey)

        # loss
        self.loss_c = mean_squared_error(self.c_data_pred, self.c_data_tf)
        self.loss_e1 = mean_squared_error(self.e1_eqns_pred, 0.0)
        self.loss_e2 = mean_squared_error(self.e2_eqns_pred, 0.0)
        self.loss_e3 = mean_squared_error(self.e3_eqns_pred, 0.0)
        self.loss_e4 = mean_squared_error(self.e4_eqns_pred, 0.0)

        self.loss = self.loss_c + \
                    self.loss_e1 + self.loss_e2 + \
                    self.loss_e3 + self.loss_e4

        # relative L2 errors
        self.error_c = relative_error(self.c_data_pred, self.c_data_tf)
        self.error_u = relative_error(self.u_data_pred, self.u_data_tf)
        self.error_v = relative_error(self.v_data_pred, self.v_data_tf)
        self.error_p = relative_error(self.p_data_pred - self.p_ref_pred,
                                      self.p_data_tf)

        # convergence plots
        self.loss_history = []
        self.loss_c_history = []
        self.loss_e1_history = []
        self.loss_e2_history = []
        self.loss_e3_history = []
        self.loss_e4_history = []

        self.error_c_history = []
        self.error_u_history = []
        self.error_v_history = []
        self.error_p_history = []

        # optimizers
        self.learning_rate = tf.placeholder(tf.float32, shape=[])
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.train_op = self.optimizer.minimize(self.loss)

        self.sess = tf_session()