from matplotlib import pyplot as plt """ Experiment Description ---------------------- Return display the initial and final weights of the network """ P = 200 Q = 100 learning_rate = 0.05 t_max = 100 #total epochs trainX, trainY, testX, testY = read_file("xi(1).csv", "tau(1).csv", P, Q) model = Model(input_size=len(trainX[0])) model.add_layer(states=2, activation='tanh', fixed_weights=False) model.add_layer(states=1, activation=None, fixed_weights=1) # model.display() # print(trainX.shape) initial_weights, final_weights = model.train(trainX, trainY, testX, testY, ephochs=t_max, learning_rate=learning_rate, return_weights=True, verbose=False)
R = 50 P = 200 Q = 100 learning_rate = 0.05 t_max = 100 #total epochs trainX, trainY, testX, testY = read_file("xi(1).csv", "tau(1).csv", P, Q) Es = [] E_tests = [] for i in range(0, R): print("Run " + str(i + 1) + " of " + str(R)) model = Model(input_size=len(trainX[0])) model.add_layer(states=2, activation='tanh', fixed_weights=False) model.add_layer(states=1, activation=None, fixed_weights=1) # model.display() # print(trainX.shape) E, E_test = model.train(trainX, trainY, testX, testY, ephochs=t_max, learning_rate=learning_rate) Es.append(E) E_tests.append(E_test)
from Network import Model from PSO import optimize from InputParam import InputParam import numpy as np import time data = np.loadtxt('./data/data.csv', delimiter=',', skiprows=2)[:, 1:7] model = Model(stride=30) param = InputParam() start = time.clock() # 调用optimize optimize(20, data, 2, model, param) end = time.clock()
from Network import Model import numpy as np '''原Interface''' data = np.loadtxt('./data/data.csv', delimiter=',', skiprows=2)[:, 1: 7] # 创建 Model, stride就是代表历史长度,通过更改model.stride就可以实现对模型的选择 # 例如: model.stride = n(n的取值范围5,10,15,20,25,30,35,40,45,50) model = Model(stride=30) #全连接神经网络 模型预测 # data_x是输入,输入的尺寸是(n, 6),n是数据的个数 model.stride = 25 data_x = data[-25:, :] # data_y是输出的预测的结果,输出的矩阵的形状是(n, 4), n是数据的个数 data_y = model.predict_LSTM(data_x) print(data_y) # LSTM 模型预测 # data_x是输入,输入的矩阵形状是(n,stride*6) stride是步长,n是数据的个数 #data_x = [[1,2,3,4,5,6],[1,2,3,4,5,6],[1,2,3,4,5,6],[1,2,3,4,5,6],[1,2,3,4,5,6]] # data_y是输出的预测的结果,输出的矩阵的形状是(4,n), n是数据的个数 #model.stride = 5 data_y = model.predict_Net(data_x) print(type(data_y[0])) ############################################ T_len, H2_len, CH4_len, CO_len = 5,10,5,20
from Network import Model import numpy as np '''原Interface''' data = np.loadtxt('./data/data.csv', delimiter=',', skiprows=2)[:, 1:7] # 创建 Model, stride就是代表历史长度,通过更改model.stride就可以实现对模型的选择 # 例如: model.stride = n(n的取值范围5,10,15,20,25,30,35,40,45,50) model = Model(stride=30) #全连接神经网络 模型预测 # data_x是输入,输入的尺寸是(n, 6),n是数据的个数 model.stride = 25 data_x = data[-25:, :] # data_y是输出的预测的结果,输出的矩阵的形状是(n, 4), n是数据的个数 data_y = model.predict_Net(data_x) print(data_y) # LSTM 模型预测 # data_x是输入,输入的矩阵形状是(n,stride*6) stride是步长,n是数据的个数 data_x = [[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]] # data_y是输出的预测的结果,输出的矩阵的形状是(4,n), n是数据的个数 model.stride = 5 data_y = model.test_future_LSTM() print(data_y)
from Network import Model from PSO import optimize from InputParam import InputParam import numpy as np import time data = np.loadtxt('./data/data.csv', delimiter=',', skiprows=2)[:, 1:7] model = Model(stride=30) param = InputParam() best_x = [[28.12421607, 14.41792793], [74.20071455, 9.92461491], [58.43944117, 11.0825983], [69.17058085, 10.75863371], [36.35079316, 11.28547075], [57.59862793, 10.92375781], [27.67301199, 14.30238826], [75.87876121, 13.22701089], [100.0, 16.10638031], [100.0, 13.78599625]] best_y = 26.45409999049893 # 调用optimize #best_x, best_y = optimize(20, data, 2, model, param) print('best_x = ', best_x) print('best_y = ', best_y) model.stride = 10 predict_y = model.predict_Net(best_x) print(predict_y)
from tensorflow.keras.datasets import mnist from tensorflow.keras.utils import to_categorical if __name__ == "__main__": # Load data (x_train, y_train), (x_test, y_test) = mnist.load_data() # Flatten input 28 * 28 matrix into a 784 vector x_train = x_train.reshape(len(x_train), 28 * 28) x_test = x_test.reshape(len(x_test), 28 * 28) # Make y into one-hot encoding y_train = to_categorical(y_train, 10) y_test = to_categorical(y_test, 10) # Build model model = Model() model.add_layer(in_dim=28 * 28, out_dim=500, activation="tanh") model.add_layer(out_dim=500, activation="sigmoid") model.add_layer(out_dim=10, activation="softmax") # Compile model model.compile(loss="cross entropy") # Train model model.fit(x_train, y_train, batch_size=256, epochs=5) # Predict and evaluate loss, accuracy = model.evaluate(x_test, y_test) print("Total loss for prediction: {}, accuracy: {}".format(loss, accuracy))