Пример #1
0
def network_training():
    loss_plt = [0] * (epoch_num * num_batch)
    epoch = [0] * (epoch_num * num_batch)
    shift = 0
    for n in range(num_batch):
        b_vals, actual_u_outputs = mg_system_data.gen_data(
            gridsize, batch_size)
        for e in range(epoch_num):
            sess.run(
                train, {
                    b: b_vals.astype(np.float32),
                    u_: actual_u_outputs.astype(np.float32)
                })
            # print('epoch # ', e, 'training_performance =', sess.run(accuracy, {b: np.array(b_vals), u_: np.array(actual_u_outputs)}))
            error = sess.run(loss, {
                b: np.array(b_vals),
                u_: np.array(actual_u_outputs)
            })
            print('epoch # ', e + shift, 'training_error =', error)
            loss_plt[e + shift] = error
            epoch[e + shift] = e + shift
        shift += epoch_num
        # print ('output = ', sess.run(output, {b: np.array(b_vals), u_: np.array(actual_u_outputs)}))
        # print ('b_vals = ', b_vals)
        # print ('actual_u_outputs = ', actual_u_outputs)
        # for var in tf.trainable_variables():
        # 	print ('var name = ', var.name,' values = ', sess.run(var))
        #print ('A inverse = ', A_inv)
    plt.plot(epoch, loss_plt)
    plt.title("Loss Plot")
    plt.xlabel("Cumulative Epoch")
    plt.ylabel("Mean squared error")
    plt.show()
Пример #2
0
def network_training():
    init = tf.global_variables_initializer()

    sess = tf.Session()
    sess.run(init)
    loss_plt = [0] * (epoch_num * num_batch)
    epoch = [0] * (epoch_num * num_batch)
    shift = 0
    for n in range(num_batch):
        b_vals, actual_u_outputs = mg_system_data.gen_data(
            gridsize, num_data, dim)
        init_vars = {}
        i = 1
        for var in tf.trainable_variables():
            var_val = sess.run(var)
            init_vars['{}'.format(var.name)] = var_val
            i += 1
        for e in range(epoch_num):
            for i in range(int(num_data / batch_size)):
                batch_x = b_vals[i:i + batch_size, :, :]
                batch_y = actual_u_outputs[i:i + batch_size, :, :]
                sess.run(train, {
                    b: batch_x.astype(np.float32),
                    u_: batch_y.astype(np.float32)
                })
            error = sess.run(loss, {
                b: np.array(b_vals),
                u_: np.array(actual_u_outputs)
            })
            print('epoch # ', e + shift, 'training_error =', error)
            loss_plt[e + shift] = error
            epoch[e + shift] = e + shift
        converged_vars = {}
        #converged_u = sess.run(output_u_, {b: np.array(b_vals), u_: np.array(actual_u_outputs)})
        j = i + 1
        for var in tf.trainable_variables():
            var_val = sess.run(var)
            #sh.write(trial+1,j,'{}'.format(var_val))
            #print ('var name = ', var.name,' values = ', var_val)
            converged_vars['{}'.format(var.name)] = var_val
            j += 1
        print('initial params = {}'.format(init_vars))
        print('converged params = {}'.format(converged_vars))
        print('epoch # ', e + shift, 'training_error =', error)
    plt.plot(epoch, loss_plt)
    plt.title("Loss Plot")
    plt.xlabel("Cumulative Epoch")
    plt.ylabel("Mean squared error")
    plt.show()
    return error, converged_vars, init_vars, epoch, loss_plt
	Author:
	Sarah K Gage
	University of Colorado Boulder

	'''

import tensorflow as tf
import numpy as np
import math

import mg_system_data
A = mg_system_data.Laplacian(6)

batch_size = 1
gridsize = 6
train_b_sets, train_solution_sets = mg_system_data.gen_data(gridsize, 1)
train_b_sets = train_b_sets.reshape((1, gridsize))
train_solution_sets = train_solution_sets.reshape((1, gridsize))
test_b_sets, test_solution_sets = mg_system_data.gen_data(gridsize, 1)
test_b_sets = test_b_sets.reshape((1, gridsize))
test_solution_sets = test_solution_sets.reshape((1, gridsize))

print(train_b_sets.shape, test_b_sets.shape)
print('training b set = ', train_b_sets)
print('training u set = ', train_solution_sets)
print('A = ', A.shape)
print(np.matmul(train_solution_sets, A))
print(train_solution_sets.shape, test_solution_sets.shape)


def build_model(b):
def network_training():
    init = tf.global_variables_initializer()
    #saver = tf.train.Saver(tf.all_variables())
    sess = tf.Session()
    sess.run(init)

    loss_plt = [0] * (epoch_num * num_batch)
    epoch = [0] * (epoch_num * num_batch)
    shift = 0
    for n in range(num_batch):
        b_vals, actual_u_outputs = mg_system_data.gen_data(
            gridsize, num_data, dim, equation)
        init_vars = {}
        i = 1
        for var in tf.trainable_variables():
            var_val = sess.run(var)
            init_vars['{}'.format(var.name)] = var_val
            #print(np.shape(var_val))
            i += 1
        error_prime = np.inf
        learning_rate_prime = learning_rate
        for e in range(epoch_num):
            #learning_rate = 1e-4 if e < 10 else 1e-2
            for i in range(int(num_data / batch_size)):
                batch_x = b_vals[i:i + batch_size, :, :]
                batch_y = actual_u_outputs[i:i + batch_size, :, :]
                sess.run(
                    train, {
                        b: batch_x.astype(np.float32),
                        u_: batch_y.astype(np.float32),
                        learning_rate_placeholder: learning_rate_prime
                    })
            error = sess.run(
                loss, {
                    b: np.array(b_vals),
                    u_: np.array(actual_u_outputs),
                    learning_rate_placeholder: learning_rate_prime
                })
            print('epoch # ', e + shift, 'training_error =', error)
            if math.isnan(error):
                break
            #print ('output_u_', sess.run(output_u_, {b: np.array(b_vals), u_: np.array(actual_u_outputs), learning_rate_placeholder: learning_rate}))
            loss_plt[e + shift] = error
            epoch[e + shift] = e + shift
            learning_rate_prime = learning_rate if (
                error_prime < error or error > 300) else (learning_rate * 10)
            learning_rate_prime = learning_rate_prime if (
                error_prime < error or error > 50) else (learning_rate_prime *
                                                         100)
            learning_rate_prime = learning_rate_prime if (
                error_prime < error or error > 3) else (learning_rate_prime *
                                                        10)
            learning_rate_prime = learning_rate_prime if (
                error_prime < error
                or error > 0.05) else (learning_rate_prime * 100)
            #learning_rate_prime = learning_rate_prime if (error_prime < error or error > 0.01) else (learning_rate_prime*10)
            error_prime = error

            hess = sess.run(
                hessian, {
                    b: batch_x.astype(np.float32),
                    u_: batch_y.astype(np.float32),
                    learning_rate_placeholder: learning_rate_prime
                })
            hess_cond_num = cond_num(hess)
            print("hessian = %s" % hess_cond_num)

        #save_path = saver.save(sess, "/tmp/model.ckpt")
        #print("Model saved in path: %s" % save_path)
        converged_vars = {}
        #converged_u = sess.run(output_u_, {b: np.array(b_vals), u_: np.array(actual_u_outputs)})
        j = i + 1
        for var in tf.trainable_variables():
            var_val = sess.run(var)
            #sh.write(trial+1,j,'{}'.format(var_val))
            #print ('var name = ', var.name,' values = ', var_val)
            converged_vars['{}'.format(var.name)] = var_val
            j += 1
        #print ('initial params = {}'.format(init_vars))
        #print ('converged params = {}'.format(converged_vars))
        #print('epoch # ', e+shift, 'training_error =', error)
    plt.plot(epoch, loss_plt)
    plt.title("Loss Plot")
    plt.xlabel("Cumulative Epoch")
    plt.ylabel("Mean squared error")
    plt.yscale('log')
    #plt.show()
    return error, converged_vars, init_vars, epoch, loss_plt