コード例 #1
0
ファイル: burgers.py プロジェクト: wzpy/SA-PINNs
def fit(x_f, t_f, x0, t0, u0, x_lb, t_lb, x_ub, t_ub, col_weights, u_weights,
        tf_iter, newton_iter):
    # Built in support for mini-batch, set to N_f (i.e. full batch) by default
    batch_sz = N_f
    n_batches = N_f // batch_sz
    start_time = time.time()
    tf_optimizer = tf.keras.optimizers.Adam(lr=0.005, beta_1=.90)
    tf_optimizer_coll = tf.keras.optimizers.Adam(lr=0.005, beta_1=.90)
    tf_optimizer_u = tf.keras.optimizers.Adam(lr=0.005, beta_1=.90)

    print("starting Adam training")

    for epoch in range(tf_iter):
        for i in range(n_batches):

            x0_batch = x0  #[i*batch_sz:(i*batch_sz + batch_sz),]
            t0_batch = t0  #[i*batch_sz:(i*batch_sz + batch_sz),]
            u0_batch = u0  #[i*batch_sz:(i*batch_sz + batch_sz),]

            x_f_batch = x_f[i * batch_sz:(i * batch_sz + batch_sz), ]
            t_f_batch = t_f[i * batch_sz:(i * batch_sz + batch_sz), ]

            with tf.GradientTape(persistent=True) as tape:
                loss_value, mse_0, mse_f = loss(x_f_batch, t_f_batch, x0_batch,
                                                t0_batch, u0_batch, x_lb, t_lb,
                                                x_ub, t_ub, col_weights,
                                                u_weights)
                grads = tape.gradient(loss_value, u_model.trainable_variables)
                grads_col = tape.gradient(loss_value, col_weights)
                grads_u = tape.gradient(loss_value, u_weights)
            tf_optimizer.apply_gradients(
                zip(grads, u_model.trainable_variables))
            tf_optimizer_coll.apply_gradients(zip([-grads_col], [col_weights]))
            tf_optimizer_u.apply_gradients(zip([-grads_u], [u_weights]))
            del tape

        if epoch % 10 == 0:
            elapsed = time.time() - start_time
            print('It: %d, Time: %.2f' % (epoch, elapsed))
            tf.print(
                f"mse_0: {mse_0}  mse_f: {mse_f}   total loss: {loss_value}")
            start_time = time.time()

    print(col_weights)
    #l-bfgs-b optimization
    print("Starting L-BFGS training")

    loss_and_flat_grad = get_loss_and_flat_grad(x_f_batch, t_f_batch, x0_batch,
                                                t0_batch, u0_batch, x_lb, t_lb,
                                                x_ub, t_ub, col_weights,
                                                u_weights)

    lbfgs(loss_and_flat_grad,
          get_weights(u_model),
          Struct(),
          maxIter=newton_iter,
          learningRate=0.8)
コード例 #2
0
ファイル: AC.py プロジェクト: zhucer2003/SA-PINNs
def fit(x_f, t_f, x0, t0, u0, x_lb, t_lb, x_ub, t_ub, col_weights, u_weights,
        tf_iter, newton_iter):

    #Can adjust batch size for collocation points, here we set it to N_f
    batch_sz = N_f
    n_batches = N_f // batch_sz

    start_time = time.time()
    #create optimizer s for the network weights, collocation point mask, and initial boundary mask
    tf_optimizer = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99)
    tf_optimizer_weights = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99)
    tf_optimizer_u = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99)

    print("starting Adam training")

    # For mini-batch (if used)
    for epoch in range(tf_iter):
        for i in range(n_batches):

            x0_batch = x0
            t0_batch = t0
            u0_batch = u0

            x_f_batch = x_f[i * batch_sz:(i * batch_sz + batch_sz), ]
            t_f_batch = t_f[i * batch_sz:(i * batch_sz + batch_sz), ]

            loss_value, mse_0, mse_b, mse_f, grads, grads_col, grads_u = grad(u_model, x_f_batch, t_f_batch, x0_batch, t0_batch, \
                                                                      u0_batch, x_lb, t_lb, x_ub, t_ub, col_weights, u_weights)

            tf_optimizer.apply_gradients(
                zip(grads, u_model.trainable_variables))
            tf_optimizer_weights.apply_gradients(
                zip([-grads_col, -grads_u], [col_weights, u_weights]))

        if epoch % 10 == 0:
            elapsed = time.time() - start_time
            print('It: %d, Time: %.2f' % (epoch, elapsed))
            tf.print(
                f"mse_0: {mse_0}  mse_b  {mse_b}  mse_f: {mse_f}   total loss: {loss_value}"
            )
            start_time = time.time()

    #l-bfgs-b optimization
    print("Starting L-BFGS training")

    loss_and_flat_grad = get_loss_and_flat_grad(x_f_batch, t_f_batch, x0_batch,
                                                t0_batch, u0_batch, x_lb, t_lb,
                                                x_ub, t_ub, col_weights,
                                                u_weights)

    lbfgs(loss_and_flat_grad,
          get_weights(u_model),
          Struct(),
          maxIter=newton_iter,
          learningRate=0.8)
コード例 #3
0
ファイル: AC.py プロジェクト: wzpy/SA-PINNs
def fit(x_f, t_f, x0, t0, u0, x_lb, t_lb, x_ub, t_ub, col_weights, u_weights, tf_iter, newton_iter):

    #Can adjust batch size for collocation points, here we set it to N_f
    batch_sz = N_f
    n_batches =  N_f // batch_sz

    start_time = time.time()
    #create optimizer s for the network weights, collocation point mask, and initial boundary mask
    tf_optimizer = tf.keras.optimizers.Adam(lr = 0.005, beta_1=.99)
    tf_optimizer_coll = tf.keras.optimizers.Adam(lr = 0.005, beta_1=.99)
    tf_optimizer_u = tf.keras.optimizers.Adam(lr = 0.005, beta_1=.99)

    print("starting Adam training")

    # For mini-batch (if used)
    for epoch in range(tf_iter):
        for i in range(n_batches):

            x0_batch = x0
            t0_batch = t0
            u0_batch = u0

            x_f_batch = x_f[i*batch_sz:(i*batch_sz + batch_sz),]
            t_f_batch = t_f[i*batch_sz:(i*batch_sz + batch_sz),]

            with tf.GradientTape(persistent=True) as tape:
                loss_value, mse_0, mse_b, mse_f = loss(x_f_batch, t_f_batch, x0_batch, t0_batch, u0_batch, x_lb, t_lb, x_ub, t_ub, col_weights, u_weights)
                grads = tape.gradient(loss_value, u_model.trainable_variables)
                grads_col = tape.gradient(loss_value, col_weights)
                grads_u = tape.gradient(loss_value, u_weights)

            #Apply individual gradients
            tf_optimizer.apply_gradients(zip(grads, u_model.trainable_variables))
            #this is where the max of the weights is applied, but incraseing the weight vector where the loss identiries the larget loss
            tf_optimizer_coll.apply_gradients(zip([-grads_col], [col_weights]))
            tf_optimizer_u.apply_gradients(zip([-grads_u], [u_weights]))


            del tape

        if epoch % 10 == 0:
            elapsed = time.time() - start_time
            print('It: %d, Time: %.2f' % (epoch, elapsed))
            tf.print(f"mse_0: {mse_0}  mse_b  {mse_b}  mse_f: {mse_f}   total loss: {loss_value}")
            start_time = time.time()

    print(col_weights)
    #l-bfgs-b optimization
    print("Starting L-BFGS training")

    loss_and_flat_grad = get_loss_and_flat_grad(x_f_batch, t_f_batch, x0_batch, t0_batch, u0_batch, x_lb, t_lb, x_ub, t_ub, col_weights, u_weights)

    lbfgs(loss_and_flat_grad,
      get_weights(u_model),
      Struct(), maxIter=newton_iter, learningRate=0.8)
コード例 #4
0
def fit(x_f, t_f, x_lb, y_lb, x_ub, y_ub, x_rb, y_rb, x_lftb, y_lftb,
        col_weights, tf_iter, newton_iter):

    batch_sz = N_f
    n_batches = N_f // batch_sz
    start_time = time.time()
    tf_optimizer = tf.keras.optimizers.Adam(lr=0.001, beta_1=.99)
    tf_optimizer_coll = tf.keras.optimizers.Adam(lr=0.001, beta_1=.99)

    print("starting Adam training")
    for epoch in range(tf_iter):
        for i in range(n_batches):

            x_f_batch = x_f[i * batch_sz:(i * batch_sz + batch_sz), ]
            y_f_batch = y_f[i * batch_sz:(i * batch_sz + batch_sz), ]

            with tf.GradientTape(persistent=True) as tape:
                loss_value, mse_b, mse_f = loss(x_f, y_f, x_lb, y_lb, x_ub,
                                                y_ub, x_rb, y_rb, x_lftb,
                                                y_lftb, col_weights)
                grads = tape.gradient(loss_value, u_model.trainable_variables)
                grads_col = tape.gradient(loss_value, col_weights)
            tf_optimizer.apply_gradients(
                zip(grads, u_model.trainable_variables))
            tf_optimizer_coll.apply_gradients(zip([-grads_col], [col_weights]))

            del tape

            elapsed = time.time() - start_time
            print('It: %d, Time: %.2f' % (epoch, elapsed))
            tf.print(
                f"mse_b: {mse_b}  mse_f: {mse_f}   total loss: {loss_value}")
            start_time = time.time()

    print("Starting L-BFGS training")

    loss_and_flat_grad = get_loss_and_flat_grad(x_f, y_f, x_lb, y_lb, x_ub,
                                                y_ub, x_rb, y_rb, x_lftb,
                                                y_lftb, col_weights)

    lbfgs(loss_and_flat_grad,
          get_weights(u_model),
          Struct(),
          maxIter=newton_iter,
          learningRate=0.8)