def loadDataSet(dataSet='ORL', splitNum=3): if dataSet == 'ORL': return loadImg(splitNum) elif dataSet == 'FERET': return loadFERET(splitNum) elif dataSet == 'Yale': return loadYale(splitNum) else: print('No this DataSet!')
y_train, class_num, k=(40 - 25) * split_num - 1) # eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B)) eig_vals, eig_vecs = np.linalg.eig(S_B - alpha * S_W) eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:, i]) for i in range(len(eig_vals))] eig_pairs = sorted(eig_pairs, key=lambda k: k[0], reverse=True) vecs = [eig_pairs[i][1].reshape(m, 1) for i in range(n_component)] W = np.hstack(vecs) return W # 加载数据 split_num = 5 train_imgs, train_labels, test_imgs, test_labels = loadImg(split_num=split_num) # 数据标准化处理 sc = StandardScaler() X_train_std = sc.fit_transform(train_imgs) X_test_std = sc.fit_transform(test_imgs) y_train = np.array(train_labels) y_test = np.array(test_labels) # 降维 #先进行 PCA处理,以免维数过高 pca = PCA(n_components=80) pca.fit(X_train_std) X_train_pca = pca.transform(X_train_std) X_test_pca = pca.transform(X_test_std) #w = mfa(X_train_pca, y_train, X_test_pca, y_test, n_component=50)
def showORL(): train_imgs, train_labels, test_imgs, test_labels = loadImg(10) for i in range(len(train_imgs)): img = train_imgs[i].reshape(112,92) plt.imshow(img,cmap='gray') plt.show()