def train_set(): global x_train global y_train global x_test global y_test print(len(x_train)) print(len(y_train)) structure = [24, 12, 1] for item in y_train: item = round(item, 3) x_train = np.asarray(x_train) y_train = np.asarray(y_train) y_test = np.asarray(y_test) x_test = np.asarray(x_test) print type(x_train.ndim) rnn = pyrenn.CreateNN(structure) #dIntern=[1] rnn = pyrenn.train_LM(np.transpose(x_train), np.transpose(y_train), rnn, verbose=True, k_max=200, E_stop=1e-7) out_test = pyrenn.NNOut(np.transpose(x_test), rnn) plt.plot(y_test, 'r', label='actual') plt.plot(out_test, 'b', label='predicted') mse = mean_squared_error(y_test, out_test) print "MSE = " + str(mse) plt.show()
def main(): # get our data as an array from read_in() res = json.loads(sys.stdin.read()) # data = [ [ 1578.0077, 0 ],[ 1581.1876, 5 ],[ 1452.4627, 33 ],[ 1449.7326, 58 ],[ 1501.0392, 80 ],[ 1460.4557, 110 ],[ 1492.824, 130 ],[ 1422.3826, 155 ],[ 1404.3431, 180 ],[ 1480.74, 210 ],[ 1410.3936, 230 ],[ 1612.336, 255 ],[ 1729.343, 280 ],[ 1735.5231, 305 ],[ 1632.595, 330 ],[ 1648.3143, 355 ],[ 1640.1972, 380 ],[ 1658.7949, 405 ],[ 1675.4953, 430 ],[ 1712.2672, 455 ],[ 1623.8666, 480 ],[ 1622.154, 505 ],[ 1630.9466, 530 ],[ 1595.8407, 555 ],[ 1548.5976, 580 ],[ 1598.6558, 605 ],[ 1624.0902, 630 ],[ 1616.8663, 655 ],[ 1661.251, 680 ],[ 2012.605, 705 ],[ 1904.3356, 730 ],[ 1760.5438, 755 ],[ 2449.3183, 780 ],[ 2417.4744, 805 ],[ 2431.7134, 830 ],[ 2391.2651, 855 ],[ 2402.8298, 885 ],[ 2417.0901, 905 ],[ 2403.8137, 930 ],[ 2407.1756, 955 ],[ 2363.049, 980 ],[ 2364.4589, 1010 ],[ 2368.4206, 1030 ],[ 2338.8434, 1055 ],[ 2369.9809, 1080 ],[ 2353.5891, 1105 ],[ 2380.8422, 1130 ],[ 2519.2731, 1155 ],[ 2557.5253, 1180 ],[ 2536.3437, 1205 ],[ 2517.6042, 1235 ],[ 2543.7378, 1255 ],[ 2355.5603, 1280 ],[ 2347.445, 1305 ],[ 2269.8631, 1335 ],[ 2307.6435, 1355 ],[ 2274.5249, 1380 ],[ 2319.0633, 1405 ],[ 2251.9456, 1430 ],[ 2273.7241, 1455 ],[ 2250.0617, 1480 ],[ 2272.8212, 1505 ],[ 2367.9611, 1530 ],[ 2351.8406, 1555 ],[ 2348.4958, 1580 ],[ 2308.7974, 1605 ],[ 2290.4632, 1630 ],[ 2303.6924, 1655 ],[ 2218.8104, 1680 ],[ 2260.9153, 1705 ],[ 2236.759, 1730 ],[ 2238.0003, 1755 ],[ 2222.3537, 1780 ],[ 2288.0802, 1805 ],[ 2240.4641, 1830 ],[ 2258.3908, 1855 ],[ 2175.4428, 1880 ],[ 2247.978, 1905 ],[ 2234.6417, 1930 ],[ 2232.0709, 1955 ],[ 2216.933, 1980 ],[ 2219.6263, 2005 ],[ 2304.114, 2030 ],[ 2230.2487, 2055 ],[ 2261.5, 2070 ] ] # #create a numpy array np_data = np.array(res['data']) # np_data = np.array(data) P = np_data[:, 1] steps = res['predict'] / res['step'] # steps = 25 Pl = np.concatenate((P, P[-1] + ((np.arange(1, steps).T) * 25))) Y = np_data[:, 0] nn = [1, 5, 5, 1] dIn = [1, 2, 3] dIntern = [] dOut = [1, 2, 3, 4] net = prn.CreateNN(nn, dIn, dIntern, dOut) net = prn.train_LM(P, Y, net, 1000, verbose=0) print('/') y_ap = prn.NNOut(Pl, net) result = np.column_stack((Pl, y_ap)) print(result.tolist())
def trainGclmNN(train_data, f): fname, target_OP = generateFilenames(f) target_OP = np.array(target_OP) net = pyrenn.CreateNN([48, 20, 20, 1]) #target_OP=np.zeros((1,n)) net = pyrenn.train_LM(train_data.transpose(), np.array(target_OP), net, k_max=500, E_stop=0.5, verbose=True) y = pyrenn.NNOut(train_data.transpose(), net) for i, j in zip(final_OP(y), target_OP.transpose()): print(i, j) accuracy(final_OP(y), target_OP.transpose()) return net
def rnn(training_set_features, training_set_class, test_set_features, test_set_class): # 1D numpy arrays # rows: inputs or outputs # columns: samples P = np.array(training_set_features) P = np.transpose(P) Y = np.array(training_set_class) Y = np.reshape(Y,(-1, len(training_set_class))) Ptest = np.array(test_set_features) Ptest = np.transpose(Ptest) Ytest = np.array(test_set_class) Ytest = np.reshape(Ytest, (-1, len(test_set_class))) net = pyrenn.CreateNN([9, 18, 18, 1], dIn=[0], dIntern=[], dOut=[]) net = pyrenn.train_LM(P, Y, net, verbose=True, k_max=30, E_stop=1e-3) y = pyrenn.NNOut(P, net) ytest = pyrenn.NNOut(Ptest, net) create_predictions_file(ytest) """fig = plt.figure(figsize=(11,7))
# Prints the correlation between the average of model outputs and the targets and then the correlation between the most recent model output and the targets print(np.corrcoef(nn_predictions.T, Y_test.T)[0, 1]**2) print(np.corrcoef(model.predict(X_test).T, Y_test.T)[0, 1]**2) nn_predictions = nn_predictions[:, 0] # A scatter of Tensorflow model claims predictions vs. actual claims pl.scatter(nn_predictions.T, Y_test.T) pl.show() # Creating a neural network using the Levenberg-Marquardt backpropagation training function # Used for quick descent training and possibly a more accurate prediction # Fewer hidden layers and less nodes are used due to a larger propensity to overfit # Cannont use a validation set for early stopping in pyrenn so these two lines are used to find convergence # Seems to converge around 10 epoches. Should stop early at 10 epoches to avoid overfitting on a small dataset net = pyrenn.CreateNN([8, 5, 1]) pyrenn.train_LM(X_train.T, Y_train.T, net, verbose=1, k_max=20) # The predictions are averaged across many different trained models for i in range(0, 10): print(i) net = pyrenn.CreateNN([8, 5, 1]) pyrenn.train_LM(X_train.T, Y_train.T, net, verbose=0, k_max=10) if i == 0: LM_predictions = pyrenn.NNOut(X_test.T, net) else: LM_predictions = (LM_predictions * (i) + pyrenn.NNOut(X_test.T, net)) / (i + 1) print(i) # Prints the correlation between the average of model outputs and the targets and then the correlation between the most recent model output and the targets
temp = (dc2_p[i] - min(dc2_p)) / (max(dc2_p) - min(dc2_p)) dc2_p_temp.append(temp) dc2_p = dc2_p_temp row_temp = np.array(row_temp) dc1_p = np.array(dc1_p) dc2_p = np.array(dc2_p) G_t = np.array(G_t) print dc1_p net = prn.CreateNN([1,13,20,1]) net = prn.train_LM(dc1_p,G_t,net,verbose=True,k_max=1000,dampfac=.2,dampconst=5,E_stop=1e-5) csv_file_training = r"training_data_unnormalized.csv" row_temp_training = [] dc1_p_training = [] dc2_p_training = [] G_t_training = [] with open(csv_file_training, 'rb') as f: next(f) reader = csv.reader(f)
#StratifiedKFold skf = StratifiedKFold(n_splits=2) skf.get_n_splits(data_X, data_Y) #creat ANN: 252 inputs, 3 output (prob for each category) net = prn.CreateNN([gr,20,20,20,20,3],dIn=[0],dIntern=[1],dOut=[]) #StratifiedKFold tr = np.transpose test_Y_cat = np.array([]) prod_Y_cat = np.array([]) for train_index, test_index in skf.split(data_X, data_Y): train_X, test_X = tr(data_X[train_index,:]), tr(data_X[test_index,:]) train_Y, test_Y = tr(data_Y_cat[train_index,:]), \ tr(data_Y_cat[test_index,:]) net = prn.train_LM(train_X,train_Y,net,verbose=True,k_max=500,E_stop=1e-5) prob_y = prn.NNOut(test_X,net) test_Y_cat = np.append(test_Y_cat,np.argmax(test_Y,axis=0)) prod_Y_cat = np.append(prod_Y_cat,np.argmax(prob_y, axis=0)) #confusion matrix, return accuray score #Following function taken from: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Blues): cat = [0,1,2] cat_num = cm.shape[0] corm = np.zeros([cat_num, cat_num]) for i in range(cat_num): for j in range(cat_num): corm[i,j] = \ cm[i,j]/np.sqrt(sum(data_Y == cat[i])*sum(data_Y == cat[j]))
### #Create and train NN #create recurrent neural network with 1 input, 2 hidden layers with #3 neurons each and 1 output #the NN uses the input data at timestep t-1 and t-2 #The NN has a recurrent connection with delay of 1,2 and 3 timesteps from the output # to the first layer (and no recurrent connection of the hidden layers) net = prn.CreateNN([1,3,3,1],dIn=[1,2],dIntern=[],dOut=[1,2,3]) #Train NN with training data P=input and Y=target #Set maximum number of iterations k_max to 200 #Set termination condition for Error E_stop to 1e-3 #The Training will stop after 200 iterations or when the Error <=E_stop net = prn.train_LM(P,Y,net,verbose=True,k_max=200,E_stop=1e-3) ### #Calculate outputs of the trained NN for test data with and without previous input P0 and output Y0 ytest = prn.NNOut(Ptest,net) y0test = prn.NNOut(Ptest,net,P0=P0test,Y0=Y0test) ### #Plot results fig = plt.figure(figsize=(11,7)) ax1 = fig.add_subplot(111) fs=18 #Test Data ax1.set_title('Test Data',fontsize=fs)
def train(self, training_data, max_iter=200, verbose=False, use_mean=True): input_matrices = [] target_matrices = [] slice_point = self.delay - 1 for vehicle in training_data: r, c = vehicle.shape # create the TARGET, sensor inputs for both sensors # drop the last column as it dow not have past observations sl = np.reshape(np.array(vehicle[2]), (1, c)) sr = np.reshape(np.array(vehicle[3]), (1, c)) target = np.concatenate((sl, sr), axis=0) target = np.delete(target, 0, axis=1) # print target # create first row in delay matrix with first motor observations delay_matrix = np.array([vehicle[0]]) mean = np.mean(vehicle[0]) for it in range(1, self.delay): # add a delay vector(line) rolled = np.reshape(np.roll(vehicle[0], it), (1, c)) # replace the missing observations # with zeros or mean of timeseries if use_mean: rolled[0, 0:it] = mean else: rolled[0, 0:it] = 0.0 delay_matrix = np.concatenate((delay_matrix, rolled), axis=0) # print delay_matrix.shape # create input delays for the rest of the time series for idx in range(1, r): # iterate the rows mean = np.mean(vehicle[idx]) for it in range(0, self.delay): # add a delay vector(line) rolled = np.reshape(np.roll(vehicle[idx], it), (1, c)) # replace the missing observations # with zeros or mean of timeseries if use_mean: rolled[0, 0:it] = mean else: rolled[0, 0:it] = 0.0 delay_matrix = np.concatenate((delay_matrix, rolled), axis=0) delay_matrix = np.delete(delay_matrix, -1, axis=1) # drop the columns that needed padding for _ in range(0, slice_point): delay_matrix = np.delete(delay_matrix, 0, axis=1) target = np.delete(target, 0, axis=1) # collect the input, target matrices input_matrices.append(delay_matrix) target_matrices.append(target) input_matrices = np.array(input_matrices) input_matrices = np.concatenate((input_matrices[:]), axis=1) target_matrices = np.array(target_matrices) target_matrices = np.concatenate((target_matrices[:]), axis=1) # print target_matrices # print input_matrices self.net = pr.train_LM(input_matrices, target_matrices, self.net, k_max=max_iter, E_stop=1e-6, verbose=verbose)
#xpred = xpred2 #xpred = np.asarray(xpred,dtype=float) print(xpred) print(xpred.shape) xpred = xpred.reshape(30, len(xpred)) print(xpred.shape) print(x.shape) print(xpred) #print(ny_y.shape) # Train the Linear Regression Object #mlpr= MLPRegressor().fit(x,y) net = pyrenn.CreateNN([30, 10, 1]) #print(net) net = pyrenn.train_LM(x, ny_y, net, verbose=True, k_max=200, E_stop=1e-2) y2 = pyrenn.NNOut(xpred, net) print(y2) """ ytest = pyrenn.NNOut(Ptest,net) fig = plt.figure(figsize=(11,7)) ax0 = fig.add_subplot(211) ax1 = fig.add_subplot(212) fs=18 #Train Data ax0.set_title('Train Data',fontsize=fs) ax0.plot(x,y2,color='b',lw=2,label='NN Output') ax0.plot(x,y,color='r',marker='None',linestyle=':',lw=3,markersize=8,label='Train Data')
Y = df[2] Ptest = df[3] Ytest = df[4] ### # Create and train NN # create feed forward neural network with 1 input, 2 hidden layers with # 3 neurons each and 1 output net = prn.CreateNN([1, 3, 3, 1]) # Train NN with training data P=input and Y=target # Set maximum number of iterations k_max to 100 # Set termination condition for Error E_stop to 1e-5 # The Training will stop after 100 iterations or when the Error <=E_stop net = prn.train_LM(P, Y, net, verbose=True, k_max=100, E_stop=9e-4) ### # Calculate outputs of the trained NN for train and test data y = prn.NNOut(P, net) ytest = prn.NNOut(Ptest, net) ### # Plot results fig = plt.figure(figsize=(11, 7)) ax0 = fig.add_subplot(211) ax1 = fig.add_subplot(212) fs = 18 #Train Data ax0.set_title('Train Data', fontsize=fs)
random_state=None) normalization_factor = np.amax(x_train, axis=0) x_of_train = (x_train / normalization_factor).T x_of_test = (x_test / normalization_factor).T y_of_train = y_train.T / 600 y_of_test = y_test.T / 600 #------------------------------讀檔創建Dataframe--------------------------------- #----------------------------ANN 主程式--------------------------------------------- #8 input,2 hidden layer, 3 neuron (create NN) net = prn.CreateNN( [features_Num, hiddenlayer1_features, hiddenlayer2_features, 1]) # Train by NN net = prn.train_LM(x_of_train, y_of_train, net, verbose=True, k_max=iteration, E_stop=1e-10) # print out result y_prn_train = prn.NNOut(x_of_train, net) y_prn_test = prn.NNOut(x_of_test, net) # print('x train data 預測的 Predicted Y:','\n',y_prn_train*600) # print('x test data 預測的 Predicted Y:','\n',y_prn_test*600) #----------------------------ANN 主程式--------------------------------------------- #----------------------------確認執行後的結果------------------------------------------ # visualize result plt.scatter(y_of_train * 600, y_prn_train * 600, label='Train sets (70% of data)') plt.scatter(y_of_test * 600,
naam += "_" + naam2[i] naam = naam.replace(':', '_') # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # example_compair.py print("example_compair") # Read Example Data df = genfromtxt('example_data_compressed_air.csv', delimiter=',') P = np.array([df[1], df[2], df[3]]) Y = np.array([df[4], df[5]]) Ptest = np.array([df[6], df[7], df[8]]) Ytest = np.array([df[9], df[10]]) # Create and train NN net = prn.CreateNN([3, 5, 5, 2], dIn=[0], dIntern=[], dOut=[1]) net = prn.train_LM(P, Y, net, k_max=500, E_stop=1e-5) res = nvidia_smi.nvmlDeviceGetUtilizationRates(handle) print(f'gpu: {res.gpu}%, gpu-mem: {res.memory}%') ### # Save outputs to certain file prn.saveNN(net, "./SavedNN/compair.csv") # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # example_friction.py print("friction") # Read Example Data df = genfromtxt('example_data_friction.csv', delimiter=',') P = df[1] Y = df[2]
train_result = csv_train.drop(csv_train.columns[0:-1], axis=1) num_features = train.shape[1] num_data = train.shape[0] num_genre = len((train.iloc[:, -1]).unique()) print('Read ' + str(num_data) + ' values with ' + str(num_features) + 'features') num_features -= 1 nn = [num_features, (3 * num_features), (3 * num_features), 10] net = pyrenn.CreateNN(nn, dIn=[0], dIntern=[], dOut=[1]) train = train.drop(train.columns[0], axis=1) P1 = numpy.array(train.values) P = (P1 - P1.min(0)) / P1.ptp(0) # #joined_new_norm = (joined_new - joined_new.min(0)) / joined_new.ptp(0) P = P.T train_result = pd.get_dummies(train_result) Y = numpy.array(train_result.values) Y = Y.T print(Y.shape) print(num_features) print(P.shape) pyrenn.train_LM(P, Y, net, k_max=1000, E_stop=1e-10, dampfac=3.0, dampconst=10.0, verbose=True)
### #Create and train NN #create recurrent neural network with 1 input, 2 hidden layers with #2 neurons each and 1 output #the NN has a recurrent connection with delay of 1 timestep in the hidden # layers and a recurrent connection with delay of 1 and 2 timesteps from the output # to the first layer net = prn.CreateNN([1,2,2,1],dIn=[0],dIntern=[1],dOut=[1,2]) #Train NN with training data P=input and Y=target #Set maximum number of iterations k_max to 100 #Set termination condition for Error E_stop to 1e-3 #The Training will stop after 100 iterations or when the Error <=E_stop net = prn.train_LM(P,Y,net,verbose=True,k_max=100,E_stop=1e-3) ### #Calculate outputs of the trained NN for train and test data y = prn.NNOut(P,net) ytest = prn.NNOut(Ptest,net) ### #Plot results fig = plt.figure(figsize=(11,7)) ax0 = fig.add_subplot(211) ax1 = fig.add_subplot(212) fs=18 #Train Data
def Tranning_by_Neural_Network(): #------------------------------讀檔創建Dataframe--------------------------------- #filepath='C:\\Users\\richard.weng\\Documents\\Python Scripts\\python_projects\\(1) NIVG Project\\ANN\\' file_data = file_name.get() + '.csv' df0 = pd.read_csv(file_data) #選擇受測人 #df = df[df.Name=='Nick'] df = df0.iloc[:, 1:] #移除first column of tester print(df.T.tail()) print('--------------------------------------------') print('df 長度為:', len(df)) print('--------------------------------------------') P = df.T.iloc[1:features_Num.get() + 1, 0:len(df)] print(P.tail()) print('input的格式:', P.shape) print('--------------------------------------------') Y = df.T.iloc[0:1, 0:len(df)] print(Y.tail()) print('output的格式:', Y.shape) print('--------------------------------------------') #轉成2d array P = np.array(P) Y = np.array(Y) # 假設70%訓練,30%要驗證 (TrainingData and TestingData) x_train, x_test, y_train, y_test = train_test_split(P.T, Y.T, test_size=0.3, random_state=None) x_of_train = (x_train / np.amax(x_train, axis=0)).T x_of_test = (x_test / np.amax(x_train, axis=0)).T y_of_train = y_train.T / 600 y_of_test = y_test.T / 600 #------------------------------讀檔創建Dataframe------------------------------------ #----------------------------ANN 主程式--------------------------------------------- #8 input,2 hidden layer, 3 neuron (create NN) net = prn.CreateNN([ features_Num.get(), hiddenlayer1_features.get(), hiddenlayer2_features.get(), 1 ]) # Train by NN net = prn.train_LM(x_of_train, y_of_train, net, verbose=True, k_max=iteration.get(), E_stop=1e-10) # print out result y_prn_train = prn.NNOut(x_of_train, net) y_prn_test = prn.NNOut(x_of_test, net) # print('x train data 預測的 Predicted Y:','\n',y_prn_train*600) # print('x test data 預測的 Predicted Y:','\n',y_prn_test*600) #----------------------------ANN 主程式--------------------------------------------- #----------------------------確認執行後的結果------------------------------------------ # visualize result plt.scatter(y_of_train * 600, y_prn_train * 600) plt.scatter(y_of_test * 600, y_prn_test * 600) plt.title('ANN Simulation Result') plt.xlabel('Input glucose (mg/dL)') plt.ylabel('Predicted glucose (mg/dL)') plt.grid() plt.show() print('測試組原本的糖值:', '\n', y_of_test * 600) print('測試組預測的糖值:', '\n', y_prn_test * 600) #----------------------------確認執行後的結果------------------------------------------ #Save ANN prn.saveNN(net, file_name.get() + '_LM_parameter' + '.csv') #Check final correlation y_all = prn.NNOut((P.T / np.amax(x_train, axis=0)).T, net) * 600 plt.scatter(Y.flatten(), y_all) Name = df0['Name'].values.tolist() df_result = pd.DataFrame({ 'Name': Name, 'total_y': Y.flatten(), 'total_pre_y': y_all }) print('相關性分析:\n', df_result.corr()) #列印出多少數據 print('總共樣本數:', len(df_result)) #Save the new result into new Excel df_result.to_csv(file_name.get() + '_LM_result' + '.csv')
Ytest = Ytest_[:, 1:100] ### #Create and train NN #create feed forward neural network with 1 input, 2 hidden layers with #4 neurons each and 1 output #the NN has a recurrent connection with delay of 1 timesteps from the output # to the first layer net = prn.CreateNN([3, 5, 5, 2], dIn=[0], dIntern=[], dOut=[1]) #Train NN with training data P=input and Y=target #Set maximum number of iterations k_max to 500 #Set termination condition for Error E_stop to 1e-5 #The Training will stop after 500 iterations or when the Error <=E_stop prn.train_LM(P, Y, net, verbose=True, k_max=500, E_stop=1e-5) ### #Calculate outputs of the trained NN for test data with and without previous input P0 and output Y0 ytest = prn.NNOut(Ptest, net) y0test = prn.NNOut(Ptest, net, P0=P0test, Y0=Y0test) ### #Plot results fig = plt.figure(figsize=(15, 10)) ax0 = fig.add_subplot(211) ax1 = fig.add_subplot(212, sharey=ax0) fs = 18 t = np.arange(0, np.shape(Ptest)[1]) / 4.0 #timesteps in 15 Minute resolution
coll_t2=[] for i in range(60): best_x, best_y = ga.run(1) best_11, best_12 = ga1.run(1) print(best_y) print(best_12) t1=ga.X t2=ga.Y if i<11: coll_t1.append(t1) coll_t2.append(t2) else: best_model=prn.train_LM(t2.T,t1.T,best_model,verbose=False,k_max=10,E_stop=1e-8) if i==10: t1=np.array(coll_t1).reshape(-1,2).T t2=np.array(coll_t2).reshape(-1,1).T np.savetxt('t1',t1) np.savetxt('t2',t2) best_run, best_model = optim.minimize(model=create_model, data=data, algo=tpe.suggest, max_evals=10, trials=Trials()) print(i+1) if i>9: temp=t2.min()-1e-3 new=(prn.NNOut(np.array([[temp]]),best_model)).T
Y = np.array([df[4], df[5]]) Ptest = np.array([df[6], df[7], df[8]]) Ytest = np.array([df[9], df[10]]) ### # Create and train NN # create feed forward neural network with 1 input, 2 hidden layers with # 4 neurons each and 1 output # the NN has a recurrent connection with delay of 1 timesteps from the output # to the first layer net = prn.CreateNN([3, 5, 5, 2], dIn=[0], dIntern=[], dOut=[1]) # Train NN with training data P=input and Y=target # Set maximum number of iterations k_max to 500 # Set termination condition for Error E_stop to 1e-5 # The Training will stop after 500 iterations or when the Error <=E_stop net = prn.train_LM(P, Y, net, verbose=True, k_max=500, E_stop=1e-5) ### # Save outputs to certain file prn.saveNN(net, "D:/School/Masterproef/Python/pyrenn/SavedNN/compair.csv") print("savegelukt") ### # Calculate outputs of the trained NN for train and test data y = prn.NNOut(P, net) ytest = prn.NNOut(Ptest, net) time_stop[0] = time.time() cpu_percent[0] = psutil.cpu_percent() virtual_mem[0] = psutil.virtual_memory()
def train_body_LM(nn_obj, nn_in, nn_in_test, nn_out, nn_out_test, n_epochs, epochs_sum, loss_val, loss_test, er_tar, train_test, retrain, root_width, root_height, root_x, root_y, lin_tr, can_tr, lin_test=None, can_test=None, vert_coef=None, hor_coef=None): for i in range(n_epochs): nn_obj = prn.train_LM(nn_in, nn_out, nn_obj, verbose=False, k_max=1, E_stop=1e-10) y_pred = prn.NNOut(nn_in, nn_obj) loss_val.append(loss(y_pred, nn_out, nn_obj)) print("Train data loss:", loss_val[-1], "%") print(i) if i == 0 and epochs_sum == 0: lin_tr, can_tr, vert_coef, hor_coef = loss_plot( i, loss_val, "Train data loss", root_width, root_height, root_x, root_y) else: update_plot(lin_tr, can_tr, epochs_sum + i, loss_val) if train_test or retrain: y_pred_test = prn.NNOut(nn_in_test, nn_obj) loss_test.append(loss(y_pred_test, nn_out_test, nn_obj)) print("Test data loss:", loss_test[-1], "%") if i == 0 and epochs_sum == 0: lin_test, can_test, vert_coef, _ = loss_plot( i, loss_test, "Test data loss", hor_coef, root_height, root_x, root_y) else: update_plot(lin_test, can_test, epochs_sum + i, loss_test) if i > 3 and retrain: if loss_test[-1] * 3 - (loss_test[-2] + loss_test[-3] + loss_test[-4]) > 0: print("Retraining") epochs_sum += i + 1 return nn_obj, vert_coef, hor_coef, epochs_sum, loss_val[ -1], lin_tr, can_tr, lin_test, can_test if loss_val[-1] <= er_tar: break epochs_sum += i + 1 return nn_obj, vert_coef, hor_coef, epochs_sum, loss_val[ -1], lin_tr, can_tr, lin_test, can_test
import pandas as pd train = pd.read_csv('C:/TermProject/ClassifiedRnn/trainREC.csv', sep=',', header=None) test = pd.read_csv('C:/TermProject/ClassifiedRnn/testREC.csv', sep=',', header=None) train = np.array(train) test = np.array(test) net = prn.CreateNN([1000, 20, 1], dIn=[0], dIntern=[1], dOut=[1, 2]) net = prn.train_LM(train[:, 1:].T, train[:, 0], net, verbose=True, k_max=30, E_stop=1e-3) y = prn.NNOut(train[:, 1:].T, net) ytest = prn.NNOut(test[:, 1:].T, net) yTestPrd = np.array(ytest) yTrainPrd = np.array(y) yTestCor = test[:, 0] yTrainCor = train[:, 0] difTest = yTestPrd - yTestCor difTrain = yTrainPrd - yTrainCor accTest = np.mean(np.abs(difTest)) accTrain = np.mean(np.abs(difTrain))
def lm_NN(nn_in, nn_out, er_tar, main_win: tk.Toplevel, min_n=0, max_n=0, n_epochs=50, conf=[1, 1, 1], sect_ner=False, train_test=False, retrain=False, spl_coef=0.1, root_width=200, root_height=200, root_x=50, root_y=50): """ create and train pyrenn NN with LM optimization algorithm nn_in: list, train NN IN data nn_out: list, train OUT data er_tar: float, MSE target min_n: int, minimum neurons for selection of the number of neurons max_n: int, maximum neurons for selection of the number of neurons n_epochs: int, maximum NN train epochs conf: list, NN configuration, first element- numbers of input, last element - numbers of outputs, other elemnts - number of neuronus on hidden layers sect_ner: bool or 0/1, whether to select the number of neurons, True if yes, False if no train_test: bool or 0/1, should the input sample be separated into training and test retrain: bool or 0/1, overfitting protection return nn_obj, NN object conf: list, NN neurons configuration """ if train_test or retrain: x = tr.from_numpy(nn_in).float() y = tr.from_numpy((nn_out)).float() dataset = tr.utils.data.TensorDataset(x, y) a = int(len(dataset) * (1 - spl_coef)) data_trn, data_test = tr.utils.data.random_split( dataset, [a, int(len(dataset) - a)], generator=tr.Generator().manual_seed(42)) dataloader = tr.utils.data.DataLoader(data_trn, shuffle=False, batch_size=len(data_trn)) nn_in = (next(iter(dataloader))[0].numpy()).T nn_out = (next(iter(dataloader))[1].numpy()).T nn_in_test = data_test[:][0].numpy().T nn_out_test = data_test[:][1].numpy().T else: nn_in_test = 0 nn_out_test = 0 nn_in = nn_in.T nn_out = nn_out.T if sect_ner: if min_n > max_n: print("Error: minimum neurons>maximum neurons") for i in range(len(conf) - 2): conf[i + 1] = min_n for i in range(1, len(conf) - 1): min_loss = 20000 b = conf[i] for j in range(min_n, max_n + 1): conf[i] = j nn_obj = prn.CreateNN(conf) nn_obj = prn.train_LM(nn_in, nn_out, nn_obj, verbose=False, k_max=1, E_stop=1e-10) y_pred = prn.NNOut(nn_in, nn_obj) a = loss(y_pred, nn_out, nn_obj) print("Current configuration:", conf, ";\t Loss:", a, "%") if a < min_loss: min_loss = a b = j conf[i] = b neurons_number_info(conf, root_height + root_y, root_x) print("Best configuration:", conf) nn_obj = prn.CreateNN(conf) loss_val = [] loss_test = [] epochs_sum = 0 train = True lin_tr = None can_tr = None lin_test = None can_test = None vert_coef = None hor_coef = None while train == True: nn_obj, vert_coef, hor_coef, epochs_sum, err, lin_tr, can_tr, lin_test, can_test = train_body_LM( nn_obj, nn_in, nn_in_test, nn_out, nn_out_test, n_epochs, epochs_sum, loss_val, loss_test, er_tar, train_test, retrain, root_width, root_height, root_x, root_y, lin_tr, can_tr, lin_test, can_test, vert_coef, hor_coef) train_win = Continue_Train(err, epochs_sum) main_win.wait_window(train_win) train = train_win.answer if not train_win.answer: return nn_obj, conf, vert_coef, hor_coef #train = False return nn_obj, conf, vert_coef, hor_coef
# the NN uses no delayed or recurrent inputs/connections net = prn.CreateNN([28 * 28, 10, 10]) batch_size = 1000 number_of_batches = 20 for i in range(number_of_batches): r = np.random.randint(0, 25000 - batch_size) Ptrain = P[:, r:r + batch_size] Ytrain = Y[:, r:r + batch_size] # Train NN with training data Ptrain=input and Ytrain=target # Set maximum number of iterations k_max # Set termination condition for Error E_stop # The Training will stop after k_max iterations or when the Error <=E_stop net = prn.train_LM(Ptrain, Ytrain, net, verbose=True, k_max=1, E_stop=1e-5) print('Batch No. ', i, ' of ', number_of_batches) ### # Select Test data # Choose random number 0...5000-9 idx = np.random.randint(0, 5000 - 9) # Select 9 random Test input data P_ = Ptest[:, idx:idx + 9] # Calculate NN Output for the 9 random test inputs Y_ = prn.NNOut(P_, net) ### # PLOT fig = plt.figure(figsize=[11, 7])
sourceframes *= pysptk.blackman(frameLength) # windowing sourcemcepvectors = np.apply_along_axis( pysptk.mcep, 1, sourceframes, order, alpha) # extract MCEPs of the source frames sr, tx = wavfile.read(targetfile) targetframes = librosa.util.frame( tx, frame_length=frameLength, # framing the target audio hop_length=hop_length).astype(np.float64).T targetframes *= pysptk.blackman(frameLength) # windowing targetmcepvectors = np.apply_along_axis( pysptk.mcep, 1, targetframes, order, alpha) # extract mceps of target frames # Normalising for feeding into RNN. norm = min(len(sourcemcepvectors), len(targetmcepvectors)) transsourcemcepvectorsmod = np.transpose(sourcemcepvectors[0:norm]) transtargetmcepvectorsmod = np.transpose(targetmcepvectors[0:norm]) # Training Model. net = pyrenn.CreateNN([order + 1, order + 5, order + 5, order + 1]) net = pyrenn.train_LM(transsourcemcepvectorsmod, transtargetmcepvectorsmod, net, k_max=100, verbose=True, E_stop=5) # Saving Model. pyrenn.saveNN(net, 'pyrennweights_2.csv')
### #Create and train NN #create feed forward neural network with 1 input, 2 hidden layers with #4 neurons each and 1 output #the NN has a recurrent connection with delay of 1 timesteps from the output # to the first layer net = prn.CreateNN([3,5,5,2],dIn=[0],dIntern=[],dOut=[1]) #Train NN with training data P=input and Y=target #Set maximum number of iterations k_max to 500 #Set termination condition for Error E_stop to 1e-5 #The Training will stop after 500 iterations or when the Error <=E_stop prn.train_LM(P,Y,net,verbose=True,k_max=500,E_stop=1e-5) ### #Calculate outputs of the trained NN for test data with and without previous input P0 and output Y0 ytest = prn.NNOut(Ptest,net) y0test = prn.NNOut(Ptest,net,P0=P0test,Y0=Y0test) ### #Plot results fig = plt.figure(figsize=(15,10)) ax0 = fig.add_subplot(211) ax1 = fig.add_subplot(212,sharey=ax0) fs=18
# 假設70%訓練,30%要驗證 (TrainingData and TestingData) x_train, x_test, y_train, y_test = train_test_split(P.T, Y.T, test_size=0.3, random_state=None) x_of_train = (x_train / np.amax(x_train, axis=0)).T x_of_test = (x_test / np.amax(x_train, axis=0)).T y_of_train = y_train.T / 600 y_of_test = y_test.T / 600 #8 input,2 hidden layer, 3 neuron (create NN) net = prn.CreateNN([8, 3, 3, 1]) # Train by NN net = prn.train_LM(x_of_train, y_of_train, net, verbose=True, k_max=100, E_stop=1e-10) # print out result y_prn_train = prn.NNOut(x_of_train, net) y_prn_test = prn.NNOut(x_of_test, net) # print('x train data 預測的 Predicted Y:','\n',y_prn_train*600) # print('x test data 預測的 Predicted Y:','\n',y_prn_test*600) # visualize result plt.scatter(y_of_train * 600, y_prn_train * 600) plt.scatter(y_of_test * 600, y_prn_test * 600) plt.title('ANN Simulation Result') plt.xlabel('Input glucose (mg/dL)') plt.ylabel('Predicted glucose (mg/dL)') plt.grid() plt.show()