Beispiel #1
0
    def get_error(self, T_star, X_star, Y_star, C_star, U_star, V_star, P_star,
                  it):

        snap = np.array([random.randint(0, 200)])
        t_test = T_star[:, snap]
        x_test = X_star[:, snap]
        y_test = Y_star[:, snap]

        c_test = C_star[:, snap]
        u_test = U_star[:, snap]
        v_test = V_star[:, snap]
        p_test = P_star[:, snap]

        # Prediction
        c_pred, u_pred, v_pred, p_pred = self.predict(t_test, x_test, y_test)

        # Error
        error_c = relative_error(c_pred, torch.from_numpy(c_test).float())
        error_u = relative_error(u_pred, torch.from_numpy(u_test).float())
        error_v = relative_error(v_pred, torch.from_numpy(v_test).float())
        error_p = relative_error(
            p_pred - torch.mean(p_pred),
            torch.from_numpy(p_test - np.mean(p_test)).float())

        self.dm.update_error(error_c, error_u, error_v, error_p, it)
        print('Error: c: %e, u: %e, v: %e, p: %e' %
              (error_c, error_u, error_v, error_p))
Beispiel #2
0
    # Test Data
    snap = np.array([100])
    t_test = T_star[:, snap]
    x_test = X_star[:, snap]
    y_test = Y_star[:, snap]

    c_test = C_star[:, snap]
    u_test = U_star[:, snap]
    v_test = V_star[:, snap]
    p_test = P_star[:, snap]

    # Prediction
    c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)

    # Error
    error_c = relative_error(c_pred, c_test)
    error_u = relative_error(u_pred, u_test)
    error_v = relative_error(v_pred, v_test)
    error_p = relative_error(p_pred - np.mean(p_pred),
                             p_test - np.mean(p_test))

    print('Error c: %e' % (error_c))
    print('Error u: %e' % (error_u))
    print('Error v: %e' % (error_v))
    print('Error p: %e' % (error_p))

    ################# Save Data ###########################

    C_pred = 0 * C_star
    U_pred = 0 * U_star
    V_pred = 0 * V_star
def main():

    batch_size = 10000

    layers = [3] + 10 * [4 * 50] + [4]

    # Load Data
    data = scipy.io.loadmat('../Data/Cylinder2D_flower.mat')

    t_star = data['t_star']  # T x 1
    x_star = data['x_star']  # N x 1
    y_star = data['y_star']  # N x 1

    T = t_star.shape[0]
    N = x_star.shape[0]

    U_star = data['U_star']  # N x T
    V_star = data['V_star']  # N x T
    P_star = data['P_star']  # N x T
    C_star = data['C_star']  # N x T

    # Rearrange Data
    T_star = np.tile(t_star, (1, N)).T  # N x T
    X_star = np.tile(x_star, (1, T))  # N x T
    Y_star = np.tile(y_star, (1, T))  # N x T

    ######################################################################
    ######################## Training Data ###############################
    ######################################################################

    T_data = 101
    N_data = 15000
    idx_t = np.concatenate([
        np.array([0]),
        np.random.choice(T - 2, T_data - 2, replace=False) + 1,
        np.array([T - 1])
    ])
    idx_x = np.random.choice(N, N_data, replace=False)
    t_data = T_star[:, idx_t][idx_x, :].flatten()[:, None]
    x_data = X_star[:, idx_t][idx_x, :].flatten()[:, None]
    y_data = Y_star[:, idx_t][idx_x, :].flatten()[:, None]
    c_data = C_star[:, idx_t][idx_x, :].flatten()[:, None]

    T_eqns = T
    N_eqns = N
    idx_t = np.concatenate([
        np.array([0]),
        np.random.choice(T - 2, T_eqns - 2, replace=False) + 1,
        np.array([T - 1])
    ])
    idx_x = np.random.choice(N, N_eqns, replace=False)
    t_eqns = T_star[:, idx_t][idx_x, :].flatten()[:, None]
    x_eqns = X_star[:, idx_t][idx_x, :].flatten()[:, None]
    y_eqns = Y_star[:, idx_t][idx_x, :].flatten()[:, None]

    # add noise
    noise = float(sys.argv[1])
    c_data = c_data + noise * np.std(c_data) * np.random.randn(
        c_data.shape[0], c_data.shape[1])

    # Training
    model = HFM(t_data,
                x_data,
                y_data,
                c_data,
                t_eqns,
                x_eqns,
                y_eqns,
                layers,
                batch_size,
                Pec=100,
                Rey=100)

    model.train(total_time=40, learning_rate=1e-3)

    # Test Data
    snap = np.array([100])
    t_test = T_star[:, snap]
    x_test = X_star[:, snap]
    y_test = Y_star[:, snap]

    c_test = C_star[:, snap]
    u_test = U_star[:, snap]
    v_test = V_star[:, snap]
    p_test = P_star[:, snap]

    # Prediction
    c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)

    # Error
    error_c = relative_error(c_pred, c_test)
    error_u = relative_error(u_pred, u_test)
    error_v = relative_error(v_pred, v_test)
    error_p = relative_error(p_pred - np.mean(p_pred),
                             p_test - np.mean(p_test))

    print('Error c: %e' % (error_c))
    print('Error u: %e' % (error_u))
    print('Error v: %e' % (error_v))
    print('Error p: %e' % (error_p))

    ################# Save Data ###########################

    C_pred = 0 * C_star
    U_pred = 0 * U_star
    V_pred = 0 * V_star
    P_pred = 0 * P_star
    for snap in range(0, t_star.shape[0]):
        t_test = T_star[:, snap:snap + 1]
        x_test = X_star[:, snap:snap + 1]
        y_test = Y_star[:, snap:snap + 1]

        c_test = C_star[:, snap:snap + 1]
        u_test = U_star[:, snap:snap + 1]
        v_test = V_star[:, snap:snap + 1]
        p_test = P_star[:, snap:snap + 1]

        # Prediction
        c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)

        C_pred[:, snap:snap + 1] = c_pred
        U_pred[:, snap:snap + 1] = u_pred
        V_pred[:, snap:snap + 1] = v_pred
        P_pred[:, snap:snap + 1] = p_pred

        # Error
        error_c = relative_error(c_pred, c_test)
        error_u = relative_error(u_pred, u_test)
        error_v = relative_error(v_pred, v_test)
        error_p = relative_error(p_pred - np.mean(p_pred),
                                 p_test - np.mean(p_test))

        print('Error c: %e' % (error_c))
        print('Error u: %e' % (error_u))
        print('Error v: %e' % (error_v))
        print('Error p: %e' % (error_p))

    scipy.io.savemat(
        '../Results/Cylinder2D_flower_results_%.2f_noise_%s.mat' %
        (noise, time.strftime('%d_%m_%Y')), {
            'C_pred': C_pred,
            'U_pred': U_pred,
            'V_pred': V_pred,
            'P_pred': P_pred
        })
Beispiel #4
0
def main():
    batch_size = 10000
    layers = [3] + 10 * [4 * 50] + [4]

    workdir = os.getcwd()
    test_info = '0'
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("--T", type=int, default=201)
    parser.add_argument("--N", type=int, default=30189)
    parser.add_argument("--mode", type=str, default='train')
    parser.add_argument("--test_info", type=str, default='1')
    parser.add_argument("--model_path",
                        type=str,
                        default='/'.join(workdir.split('/')[:-1]) +
                        '/Model/test_{}/'.format(test_info))
    parser.add_argument("--data", type=str, default='../Data/Cylinder2D.mat')
    parser.add_argument("--time", type=int, default=60)
    parser.add_argument("--lr", type=float, default=1e-3)
    args = parser.parse_args()

    T_data = args.T
    N_data = args.N
    Running_Mode = args.mode
    model_path = args.model_path
    if Running_Mode == 'input':
        if not os.path.exists(model_path):
            raise EnvironmentError('Model path not exist, Please check!')

    datafile = args.data
    total_time = args.time
    learning_rate = args.lr
    origin_fname = (datafile.split('/')[-1]).split('.')[0]
    test_info = args.test_info

    # Load Data

    data = scipy.io.loadmat(datafile)

    t_star = data['t_star']  # T x 1
    x_star = data['x_star']  # N x 1
    y_star = data['y_star']  # N x 1

    T = t_star.shape[0]
    N = x_star.shape[0]

    U_star = data['U_star']  # N x T
    V_star = data['V_star']  # N x T
    P_star = data['P_star']  # N x T
    C_star = data['C_star']  # N x T

    # Rearrange Data
    T_star = np.tile(t_star, (1, N)).T  # N x T
    X_star = np.tile(x_star, (1, T))  # N x T
    Y_star = np.tile(y_star, (1, T))  # N x T

    # T_data = int(sys.argv[1])
    # N_data = int(sys.argv[2])
    # Running_Mode=sys.argv[3]

    if Running_Mode not in ['train', 'input']:
        raise ValueError('Wrong Running mode, please check!')

    ######################################################################
    ######################## Training Data ###############################
    ######################################################################
    idx_t = np.concatenate([
        np.array([0]),
        np.random.choice(T - 2, T_data - 2, replace=False) + 1,
        np.array([T - 1])
    ])
    idx_x = np.random.choice(N, N_data, replace=False)
    t_data = T_star[:, idx_t][idx_x, :].flatten()[:, None]
    x_data = X_star[:, idx_t][idx_x, :].flatten()[:, None]
    y_data = Y_star[:, idx_t][idx_x, :].flatten()[:, None]
    c_data = C_star[:, idx_t][idx_x, :].flatten()[:, None]

    T_eqns = T
    N_eqns = N
    idx_t = np.concatenate([
        np.array([0]),
        np.random.choice(T - 2, T_eqns - 2, replace=False) + 1,
        np.array([T - 1])
    ])
    idx_x = np.random.choice(N, N_eqns, replace=False)
    t_eqns = T_star[:, idx_t][idx_x, :].flatten()[:, None]
    x_eqns = X_star[:, idx_t][idx_x, :].flatten()[:, None]
    y_eqns = Y_star[:, idx_t][idx_x, :].flatten()[:, None]

    # Training
    model = C2P(t_data,
                x_data,
                y_data,
                c_data,
                t_eqns,
                x_eqns,
                y_eqns,
                layers,
                batch_size,
                Pec=100,
                Rey=100)
    print('-----Model Class set Completed-----')

    # Training parameters
    if Running_Mode == 'train':
        print('Working on [TRAIN] mode:\nTraning time:{}h.................'.
              format(Running_Mode, round(total_time / 3600.0)))
        model.train(total_time=total_time,
                    learning_rate=learning_rate,
                    test_info=test_info)

    #Weighs output
    if Running_Mode != 'input':
        print('Working on [{}] mode:Weighs saving.............')
        fname = 'test_model.ckpt'
        filepath = '/'.join(
            workdir.split('/')[:-1]) + '/Model/test_{}'.format(test_info)
        file = filepath + '/' + fname
        model.w_extract(save_file=file)
        tr_record = '/home/ljj/PycharmWork/CtoP/Results/tr_record.txt'
        with open(tr_record, "a") as f:
            f.write(
                'Model Saved:{}.\n'.format(file) +
                '---------------------------------------------------------\n' +
                ' \n')
        print(
            '--------------------------Model Train Completed--------------------------'
        )

    # Weighs input
    if Running_Mode == 'input':
        print('Working on [INPUT] mode:Weighs loading.............')
        model.w_input(model_path=model_path)

    # Test Data
    snap = np.array([100])
    t_test = T_star[:, snap]
    x_test = X_star[:, snap]
    y_test = Y_star[:, snap]

    c_test = C_star[:, snap]
    u_test = U_star[:, snap]
    v_test = V_star[:, snap]
    p_test = P_star[:, snap]

    # Prediction
    c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)

    # Error
    error_c = relative_error(c_pred, c_test)
    error_u = relative_error(u_pred, u_test)
    error_v = relative_error(v_pred, v_test)
    error_p = relative_error(p_pred - np.mean(p_pred),
                             p_test - np.mean(p_test))

    print('Error c: %e' % (error_c))
    print('Error u: %e' % (error_u))
    print('Error v: %e' % (error_v))
    print('Error p: %e' % (error_p))

    ################# Save Data ###########################

    C_pred = 0 * C_star
    U_pred = 0 * U_star
    V_pred = 0 * V_star
    P_pred = 0 * P_star

    L2c = 0 * C_star
    L2u = 0 * U_star
    L2v = 0 * V_star
    L2p = 0 * P_star

    for snap in range(0, t_star.shape[0]):
        t_test = T_star[:, snap:snap + 1]
        x_test = X_star[:, snap:snap + 1]
        y_test = Y_star[:, snap:snap + 1]

        c_test = C_star[:, snap:snap + 1]
        u_test = U_star[:, snap:snap + 1]
        v_test = V_star[:, snap:snap + 1]
        p_test = P_star[:, snap:snap + 1]

        # Prediction
        c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)

        C_pred[:, snap:snap + 1] = c_pred
        U_pred[:, snap:snap + 1] = u_pred
        V_pred[:, snap:snap + 1] = v_pred
        P_pred[:, snap:snap + 1] = p_pred

        # Error
        error_c = relative_error(c_pred, c_test)
        error_u = relative_error(u_pred, u_test)
        error_v = relative_error(v_pred, v_test)
        error_p = relative_error(p_pred - np.mean(p_pred),
                                 p_test - np.mean(p_test))

        L2c[:, snap:snap + 1] = error_c
        L2u[:, snap:snap + 1] = error_u
        L2v[:, snap:snap + 1] = error_v
        L2p[:, snap:snap + 1] = error_p

        # print('Error c: %e' % (error_c))
        # print('Error u: %e' % (error_u))
        # print('Error v: %e' % (error_v))
        # print('Error p: %e' % (error_p))

    savemat_path = '/'.join(workdir.split('/')[:-1]) + '/Results'
    if not os.path.exists(savemat_path):
        os.makedirs(savemat_path)
    savemat_name = 'C2P_result_{}_{}_test{}.mat'.format(
        origin_fname, Running_Mode, test_info)
    savefile = savemat_path + '/' + savemat_name
    scipy.io.savemat(
        savefile, {
            'C_pred': C_pred,
            'U_pred': U_pred,
            'V_pred': V_pred,
            'P_pred': P_pred,
            'Error c': L2c,
            'Error u': L2u,
            'Error v': L2v,
            'Error p': L2p
        })

    print('---------------Mission accomplished:{}.-----------------------'.
          format(savemat_name))
    sx_test = Sx_star[:, snap]
    sy_test = Sy_star[:, snap]
    sz_test = Sz_star[:, snap]

    # Prediction
    c_pred, u_pred, v_pred, w_pred, p_pred = model.predict(
        t_test, x_test, y_test, z_test)

    # Shear
    sx_pred, sy_pred, sz_pred = model.predict_shear(t_test[0] + 0.0 * xb_star,
                                                    xb_star, yb_star, zb_star,
                                                    nx_star, ny_star, nz_star)

    # Error
    error_c = relative_error(c_pred, c_test)
    error_u = relative_error(u_pred, u_test)
    error_v = relative_error(v_pred, v_test)
    error_w = relative_error(w_pred, w_test)
    error_p = relative_error(p_pred - np.mean(p_pred),
                             p_test - np.mean(p_test))

    print('Error c: %e' % (error_c))
    print('Error u: %e' % (error_u))
    print('Error v: %e' % (error_v))
    print('Error w: %e' % (error_w))
    print('Error p: %e' % (error_p))
    sys.stdout.flush()

    # Error
    error_sx = relative_error(sx_pred, sx_test)
Beispiel #6
0
    c_test = C_star[:, snap:snap + 1]
    u_test = U_star[:, snap:snap + 1]
    v_test = V_star[:, snap:snap + 1]
    p_test = P_star[:, snap:snap + 1]

    # Prediction
    c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)

    C_pred[:, snap:snap + 1] = c_pred.detach().numpy()
    U_pred[:, snap:snap + 1] = u_pred.detach().numpy()
    V_pred[:, snap:snap + 1] = v_pred.detach().numpy()
    P_pred[:, snap:snap + 1] = p_pred.detach().numpy()

    # Error
    error_c = relative_error(c_pred, torch.from_numpy(c_test).float())
    error_u = relative_error(u_pred, torch.from_numpy(u_test).float())
    error_v = relative_error(v_pred, torch.from_numpy(v_test).float())
    error_p = relative_error(
        p_pred - torch.mean(p_pred),
        torch.from_numpy(p_test - np.mean(p_test)).float())

    print('Error: c: %e, u: %e, v: %e, p: %e' %
          (error_c, error_u, error_v, error_p))

scipy.io.savemat(
    ('../Results/Cylinder2D_flower_results_%d_%d_%s_' + version + '.mat') %
    (T_data, N_data, time.strftime('%d_%m_%Y')), {
        'C_pred': C_pred,
        'U_pred': U_pred,
        'V_pred': V_pred,
Beispiel #7
0
    def __init__(self, t_data, x_data, y_data, c_data, u_data, v_data, p_data,
                 x_ref, y_ref, t_eqns, x_eqns, y_eqns, layers, batch_size, Pec,
                 Rey):

        # specs
        self.layers = layers
        self.batch_size = batch_size

        # flow properties
        self.Pec = Pec
        self.Rey = Rey

        # data
        [self.t_data, self.x_data, self.y_data,
         self.c_data] = [t_data, x_data, y_data, c_data]
        [self.u_data, self.v_data, self.p_data] = [u_data, v_data, p_data]
        [self.x_ref, self.y_ref] = [x_ref, y_ref]
        [self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]

        # placeholders
        [self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
        [self.u_data_tf, self.v_data_tf, self.p_data_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
        [self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf
         ] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]

        # physics "uninformed" neural networks
        self.net_cuvp = neural_net(self.t_data,
                                   self.x_data,
                                   self.y_data,
                                   layers=self.layers)

        [
            self.c_data_pred, self.u_data_pred, self.v_data_pred,
            self.p_data_pred
        ] = self.net_cuvp(self.t_data_tf, self.x_data_tf, self.y_data_tf)

        [_, _, _,
         self.p_ref_pred] = self.net_cuvp(self.t_data_tf,
                                          self.x_data_tf * 0.0 + self.x_ref,
                                          self.y_data_tf * 0.0 + self.y_ref)

        # physics "informed" neural networks
        [
            self.c_eqns_pred, self.u_eqns_pred, self.v_eqns_pred,
            self.p_eqns_pred
        ] = self.net_cuvp(self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf)

        [
            self.e1_eqns_pred, self.e2_eqns_pred, self.e3_eqns_pred,
            self.e4_eqns_pred
        ] = Navier_Stokes_2D(self.c_eqns_pred, self.u_eqns_pred,
                             self.v_eqns_pred, self.p_eqns_pred,
                             self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf,
                             self.Pec, self.Rey)

        # loss
        self.loss_c = mean_squared_error(self.c_data_pred, self.c_data_tf)
        self.loss_e1 = mean_squared_error(self.e1_eqns_pred, 0.0)
        self.loss_e2 = mean_squared_error(self.e2_eqns_pred, 0.0)
        self.loss_e3 = mean_squared_error(self.e3_eqns_pred, 0.0)
        self.loss_e4 = mean_squared_error(self.e4_eqns_pred, 0.0)

        self.loss = self.loss_c + \
                    self.loss_e1 + self.loss_e2 + \
                    self.loss_e3 + self.loss_e4

        # relative L2 errors
        self.error_c = relative_error(self.c_data_pred, self.c_data_tf)
        self.error_u = relative_error(self.u_data_pred, self.u_data_tf)
        self.error_v = relative_error(self.v_data_pred, self.v_data_tf)
        self.error_p = relative_error(self.p_data_pred - self.p_ref_pred,
                                      self.p_data_tf)

        # convergence plots
        self.loss_history = []
        self.loss_c_history = []
        self.loss_e1_history = []
        self.loss_e2_history = []
        self.loss_e3_history = []
        self.loss_e4_history = []

        self.error_c_history = []
        self.error_u_history = []
        self.error_v_history = []
        self.error_p_history = []

        # optimizers
        self.learning_rate = tf.placeholder(tf.float32, shape=[])
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.train_op = self.optimizer.minimize(self.loss)

        self.sess = tf_session()
Beispiel #8
0
    # Test Data
    snap = np.array([100])
    t_test = T_star[:, snap]
    x_test = X_star[:, snap]
    y_test = Y_star[:, snap]

    c_test = C_star[:, snap]
    u_test = U_star[:, snap]
    v_test = V_star[:, snap]
    p_test = P_star[:, snap]

    # Prediction
    c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)

    # Error
    error_c = relative_error(c_pred, c_test)
    error_u = relative_error(u_pred, u_test)
    error_v = relative_error(v_pred, v_test)
    error_p = relative_error(p_pred - np.mean(p_pred, axis=0, keepdims=True),
                             p_test - np.mean(p_test, axis=0, keepdims=True))

    print('Error c: %e' % (error_c))
    print('Error u: %e' % (error_u))
    print('Error v: %e' % (error_v))
    print('Error p: %e' % (error_p))

    ################# Save Data ###########################

    C_pred = 0 * C_star
    U_pred = 0 * U_star
    V_pred = 0 * V_star