def train_keras_lnn(nn_array, org_data, org_label, algorithm): """Get the fnn output and input the lnn""" fnn_output = np.array([]) for name in nn_array: print('<---nn -> ', name, '--->') rel_path = './Experiment/Method3/FNNModel/' + name + '.json' abs_path = os.path.join(os.path.dirname(__file__), rel_path) attribute = LoadData.load_fnn_weight(abs_path) mean = np.asarray(attribute['Mean']) stddev = np.asarray(attribute['Stddev']) weight = np.asarray(attribute['Weight']) # Test the FNN fnn = FNN(fnn_input_size, fnn_membership_size, fnn_rule_size, fnn_output_size, mean, stddev, weight, fnn_lr, 1) result = fnn.testing_model(org_data) fnn_output = np.append(fnn_output, result) fnn_output = fnn_output.reshape(len(nn_array), -1).T # fnn_label = np.array([int(e[1:2])-1 for e in org_label]) print('org_label', org_label) fnn_label = label_convert(org_label) X_train, X_test, y_train, y_test = train_test_split(fnn_output, fnn_label, test_size=0.3, random_state=42) print('X_train.shape', X_train.shape) print('y_train.shape', y_train.shape) # Construct the lnn y_trainOneHot = np_utils.to_categorical(y_train) y_testOneHot = np_utils.to_categorical(y_test) model = Sequential() model.add(Dense(units=32, input_dim=12)) model.add(Dense(32, activation='tanh')) model.add( Dense(units=12, kernel_initializer='normal', activation='softmax')) adam = optimizers.Adam(lr=0.001) model.compile(loss='mean_squared_error', optimizer=adam, metrics=['mse']) model.summary() train_history = model.fit(x=X_train, y=y_trainOneHot, validation_split=0.2, epochs=30, batch_size=200, verbose=2) show_train_history(train_history, 'mean_squared_error', 'val_mean_squared_error', 'mean_squared_error.png') show_train_history(train_history, 'loss', 'val_loss', 'loss.png') scores = model.evaluate(X_test, y_testOneHot) print('scores', scores) prediction = model.predict(X_test) for x, y in zip(prediction[:10], y_testOneHot[:10]): print(x, ' ', y) prediction = model.predict_classes(X_test) y_pred = prediction_convert(prediction) yy = onehot_convert(y_testOneHot) print(set(y_pred)) print(set(yy)) cnf_matrix = confusion_matrix(yy, y_pred) print('accuracy_score', accuracy_score(yy, y_pred)) print('cnf_matrix\n', cnf_matrix) rel_path = './Experiment/method3/Graph/cnf_lnn.png' abs_path = os.path.join(os.path.dirname(__file__), rel_path) plt.figure(figsize=(8, 6), dpi=200) ConfusionMatrix.plot_confusion_matrix(cnf_matrix, abs_path, classes=list(set(y_pred)), title='Final Model Confusion matrix')
0.2, 0.0, 0.1, 0.1, 0.2, 0.1, 0.0, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1 ] # Load the Test data org_data, org_label = LoadData.get_method2_test() # print('org_data.shape', org_data.shape) # print('org_label.shape', org_label) output_array = np.array([]) # Load the test data, forward, store for nn in nn_category: print('nn -> ', nn) rel_path = '../Experiment/Method2/FNNModel/FNN/' + str(nn) + '.json' abs_path = os.path.join(os.path.dirname(__file__), rel_path) attribute = LoadData.load_fnn_weight(abs_path) # print(attribute) mean = np.asarray(attribute['Mean']) stddev = np.asarray(attribute['Stddev']) weight = np.asarray(attribute['Weight']) # Test the FNN fnn = FNN(fnn_input_size, fnn_membership_size, fnn_rule_size, fnn_output_size, mean, stddev, weight, fnn_lr, 1) output = fnn.testing_model(org_data) output_array = np.append(output_array, output) # 轉置矩陣 print(len(nn_category)) output_array = output_array.reshape(len(nn_category), -1).T print('output_array', output_array) print(output_array.shape)