コード例 #1
0
    def compute_loss(self, batch_size, it):
        idx_data = np.random.choice(N_data, min(batch_size, N_data))
        idx_eqns = np.random.choice(N_eqns, batch_size)

        # wrap with Variable, might be sent to GPU
        data_batch = Variable(torch.from_numpy(self.data[idx_data, :]).float(),
                              requires_grad=True)
        c_data_batch = Variable(torch.from_numpy(
            self.c_data[idx_data, :]).float(),
                                requires_grad=True)
        eqns_batch = Variable(torch.from_numpy(self.eqns[idx_eqns, :]).float(),
                              requires_grad=True)

        # predict and split
        [c_data_pred, _, _, _] = torch.split(self.net(data_batch), 1, 1)
        [c_eqns_pred, u_eqns_pred, v_eqns_pred,
         p_eqns_pred] = torch.split(self.net(eqns_batch), 1, 1)
        [e1_eqns_pred, e2_eqns_pred, e3_eqns_pred, e4_eqns_pred] = \
            Navier_Stokes_2D(c_eqns_pred, u_eqns_pred, v_eqns_pred, p_eqns_pred,
                             eqns_batch, self.Pec, self.Rey)

        # get loss
        c_loss = mean_squared_error(c_data_pred, c_data_batch)
        e1_loss = mean_squared_error(e1_eqns_pred,
                                     torch.zeros_like(e1_eqns_pred))
        e2_loss = mean_squared_error(e2_eqns_pred,
                                     torch.zeros_like(e1_eqns_pred))
        e3_loss = mean_squared_error(e3_eqns_pred,
                                     torch.zeros_like(e1_eqns_pred))
        e4_loss = mean_squared_error(e4_eqns_pred,
                                     torch.zeros_like(e1_eqns_pred))
        loss = c_loss + (e1_loss + e2_loss + e3_loss + e4_loss)

        # update datamanger and return
        self.dm.update(c_loss, e1_loss, e2_loss, e3_loss, e4_loss, loss, it)
        return loss
コード例 #2
0
ファイル: Cylinder2D_No_Slip.py プロジェクト: ZetaJ7/CtoP
    def __init__(self, t_data, x_data, y_data, c_data, t_eqns, x_eqns, y_eqns,
                 t_inlet, x_inlet, y_inlet, u_inlet, v_inlet, t_cyl, x_cyl,
                 y_cyl, layers, batch_size, Pec, Rey):

        # specs
        self.layers = layers
        self.batch_size = batch_size

        # flow properties
        self.Pec = Pec
        self.Rey = Rey

        # data
        [self.t_data, self.x_data, self.y_data,
         self.c_data] = [t_data, x_data, y_data, c_data]
        [self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]
        [self.t_inlet, self.x_inlet, self.y_inlet, self.u_inlet,
         self.v_inlet] = [t_inlet, x_inlet, y_inlet, u_inlet, v_inlet]
        [self.t_cyl, self.x_cyl, self.y_cyl] = [t_cyl, x_cyl, y_cyl]

        # placeholders
        [self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
        [self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
        [
            self.t_inlet_tf, self.x_inlet_tf, self.y_inlet_tf, self.u_inlet_tf,
            self.v_inlet_tf
        ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(5)]
        [self.t_cyl_tf, self.x_cyl_tf, self.y_cyl_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]

        # physics "uninformed" neural networks
        self.net_cuvp = neural_net(self.t_data,
                                   self.x_data,
                                   self.y_data,
                                   layers=self.layers)

        [
            self.c_data_pred, self.u_data_pred, self.v_data_pred,
            self.p_data_pred
        ] = self.net_cuvp(self.t_data_tf, self.x_data_tf, self.y_data_tf)

        # physics "uninformed" neural networks (data at the inlet)
        [_, self.u_inlet_pred, self.v_inlet_pred,
         _] = self.net_cuvp(self.t_inlet_tf, self.x_inlet_tf, self.y_inlet_tf)

        # physics "uninformed" neural networks (data on the cylinder)
        [_, self.u_cyl_pred, self.v_cyl_pred,
         _] = self.net_cuvp(self.t_cyl_tf, self.x_cyl_tf, self.y_cyl_tf)

        # physics "informed" neural networks
        [
            self.c_eqns_pred, self.u_eqns_pred, self.v_eqns_pred,
            self.p_eqns_pred
        ] = self.net_cuvp(self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf)

        [
            self.e1_eqns_pred, self.e2_eqns_pred, self.e3_eqns_pred,
            self.e4_eqns_pred
        ] = Navier_Stokes_2D(self.c_eqns_pred, self.u_eqns_pred,
                             self.v_eqns_pred, self.p_eqns_pred,
                             self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf,
                             self.Pec, self.Rey)

        # gradients required for the lift and drag forces
        [
            self.u_x_eqns_pred, self.v_x_eqns_pred, self.u_y_eqns_pred,
            self.v_y_eqns_pred
        ] = Gradient_Velocity_2D(self.u_eqns_pred, self.v_eqns_pred,
                                 self.x_eqns_tf, self.y_eqns_tf)

        # loss
        self.loss = mean_squared_error(self.c_data_pred, self.c_data_tf) + \
                    mean_squared_error(self.u_inlet_pred, self.u_inlet_tf) + \
                    mean_squared_error(self.v_inlet_pred, self.v_inlet_tf) + \
                    mean_squared_error(self.u_cyl_pred, 0.0) + \
                    mean_squared_error(self.v_cyl_pred, 0.0) + \
                    mean_squared_error(self.e1_eqns_pred, 0.0) + \
                    mean_squared_error(self.e2_eqns_pred, 0.0) + \
                    mean_squared_error(self.e3_eqns_pred, 0.0) + \
                    mean_squared_error(self.e4_eqns_pred, 0.0)

        # optimizers
        self.learning_rate = tf.placeholder(tf.float32, shape=[])
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.train_op = self.optimizer.minimize(self.loss)

        self.sess = tf_session()
コード例 #3
0
    def __init__(self, t_data, x_data, y_data, c_data, t_eqns, x_eqns, y_eqns,
                 layers, batch_size, Pec, Rey):

        # specs
        self.layers = layers
        self.batch_size = batch_size

        # flow properties
        self.Pec = Pec
        self.Rey = Rey

        # data
        [self.t_data, self.x_data, self.y_data,
         self.c_data] = [t_data, x_data, y_data, c_data]
        [self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]

        # placeholders
        [self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
        [self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]

        # physics "uninformed" neural networks
        self.net_cuvp = neural_net(self.t_data,
                                   self.x_data,
                                   self.y_data,
                                   layers=self.layers)

        [
            self.c_data_pred, self.u_data_pred, self.v_data_pred,
            self.p_data_pred
        ] = self.net_cuvp(self.t_data_tf, self.x_data_tf, self.y_data_tf)

        # physics "informed" neural networks
        [
            self.c_eqns_pred, self.u_eqns_pred, self.v_eqns_pred,
            self.p_eqns_pred
        ] = self.net_cuvp(self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf)

        [
            self.e1_eqns_pred, self.e2_eqns_pred, self.e3_eqns_pred,
            self.e4_eqns_pred
        ] = Navier_Stokes_2D(self.c_eqns_pred, self.u_eqns_pred,
                             self.v_eqns_pred, self.p_eqns_pred,
                             self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf,
                             self.Pec, self.Rey)

        [
            self.eps11dot_eqns_pred, self.eps12dot_eqns_pred,
            self.eps22dot_eqns_pred
        ] = Strain_Rate_2D(self.u_eqns_pred, self.v_eqns_pred, self.x_eqns_tf,
                           self.y_eqns_tf)

        # loss
        self.loss = mean_squared_error(self.c_data_pred, self.c_data_tf) + \
                    mean_squared_error(self.e1_eqns_pred, 0.0) + \
                    mean_squared_error(self.e2_eqns_pred, 0.0) + \
                    mean_squared_error(self.e3_eqns_pred, 0.0) + \
                    mean_squared_error(self.e4_eqns_pred, 0.0)

        # optimizers
        self.learning_rate = tf.placeholder(tf.float32, shape=[])
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.train_op = self.optimizer.minimize(self.loss)

        self.sess = tf_session()
コード例 #4
0
    def __init__(self, t_data, x_data, y_data, c_data, u_data, v_data, p_data,
                 x_ref, y_ref, t_eqns, x_eqns, y_eqns, layers, batch_size, Pec,
                 Rey):

        # specs
        self.layers = layers
        self.batch_size = batch_size

        # flow properties
        self.Pec = Pec
        self.Rey = Rey

        # data
        [self.t_data, self.x_data, self.y_data,
         self.c_data] = [t_data, x_data, y_data, c_data]
        [self.u_data, self.v_data, self.p_data] = [u_data, v_data, p_data]
        [self.x_ref, self.y_ref] = [x_ref, y_ref]
        [self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]

        # placeholders
        [self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
        [self.u_data_tf, self.v_data_tf, self.p_data_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
        [self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]

        # physics "uninformed" neural networks
        self.net_cuvp = neural_net(self.t_data,
                                   self.x_data,
                                   self.y_data,
                                   layers=self.layers)

        [
            self.c_data_pred, self.u_data_pred, self.v_data_pred,
            self.p_data_pred
        ] = self.net_cuvp(self.t_data_tf, self.x_data_tf, self.y_data_tf)

        [_, _, _,
         self.p_ref_pred] = self.net_cuvp(self.t_data_tf,
                                          self.x_data_tf * 0.0 + self.x_ref,
                                          self.y_data_tf * 0.0 + self.y_ref)

        # physics "informed" neural networks
        [
            self.c_eqns_pred, self.u_eqns_pred, self.v_eqns_pred,
            self.p_eqns_pred
        ] = self.net_cuvp(self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf)

        [
            self.e1_eqns_pred, self.e2_eqns_pred, self.e3_eqns_pred,
            self.e4_eqns_pred
        ] = Navier_Stokes_2D(self.c_eqns_pred, self.u_eqns_pred,
                             self.v_eqns_pred, self.p_eqns_pred,
                             self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf,
                             self.Pec, self.Rey)

        # loss
        self.loss_c = mean_squared_error(self.c_data_pred, self.c_data_tf)
        self.loss_e1 = mean_squared_error(self.e1_eqns_pred, 0.0)
        self.loss_e2 = mean_squared_error(self.e2_eqns_pred, 0.0)
        self.loss_e3 = mean_squared_error(self.e3_eqns_pred, 0.0)
        self.loss_e4 = mean_squared_error(self.e4_eqns_pred, 0.0)

        self.loss = self.loss_c + \
                    self.loss_e1 + self.loss_e2 + \
                    self.loss_e3 + self.loss_e4

        # relative L2 errors
        self.error_c = relative_error(self.c_data_pred, self.c_data_tf)
        self.error_u = relative_error(self.u_data_pred, self.u_data_tf)
        self.error_v = relative_error(self.v_data_pred, self.v_data_tf)
        self.error_p = relative_error(self.p_data_pred - self.p_ref_pred,
                                      self.p_data_tf)

        # convergence plots
        self.loss_history = []
        self.loss_c_history = []
        self.loss_e1_history = []
        self.loss_e2_history = []
        self.loss_e3_history = []
        self.loss_e4_history = []

        self.error_c_history = []
        self.error_u_history = []
        self.error_v_history = []
        self.error_p_history = []

        # optimizers
        self.learning_rate = tf.placeholder(tf.float32, shape=[])
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.train_op = self.optimizer.minimize(self.loss)

        self.sess = tf_session()