def display_pca_projection_dim2(X3, X5): X = np.concatenate((X3, X5), axis=1) m, B, Y = pca.decompose(X) line3, = plt.plot(Y[0][:20], Y[1][:20], 'o') line5, = plt.plot(Y[0][20:], Y[1][20:], '+') plt.legend([line3, line5], ['3', '5']) plt.show()
def showPcaProjection(X0, X1, legendStr): cat = np.concatenate((X0, X1), axis=1) Y = pca.decompose(cat)[2] print Y.shape line1, = plt.plot(Y[0, :X0.shape[1]], Y[1, :X0.shape[1]], '+') line2, = plt.plot(Y[0, X0.shape[1]:], Y[1, X0.shape[1]:], '*') plt.legend([line1, line2], legendStr) plt.show()
def ldaDemo_Random(): N1, N2 = 10, 10 x1 = [random.gauss(0.0, 3.0) for i in range(N1)] y1 = [random.gauss(0.0, 1.0) for i in range(N1)] x2 = [random.gauss(3.0, 3.0) for i in range(N2)] y2 = [random.gauss(4.0, 1.0) for i in range(N2)] # pca X = np.array([x1 + x2, y1 + y2]) m, B, Y = pca.decompose(X) pcaX = m[0] + B[0, 0] * Y[0, :] pcaY = m[1] + B[1, 0] * Y[0, :] # Fisher's lda X1, X2 = np.array([x1, y1]), np.array([x2, y2]) m1, m2 = rowMean(X1), rowMean(X2) S1, S2 = np.cov(X1), np.cov(X2) B = (N1 / float(N1 + N2)) * S1 + (N2 / float(N1 + N2)) * S2 # One may use the Moore-Penrose inverse, la.pinv(), if B is not full rank w = np.matmul(la.inv(B), m1 - m2) w /= la.norm(w) coef = np.matmul(w.T, X) # w is 2 x 1 matrix # coef is 1 x (N1 + N2) matrix projX, projY = w[0][0] * coef[0], w[1][0] * coef[0] line1, = plt.plot(x1, y1, 'o') line2, = plt.plot(x2, y2, '+') line3, = plt.plot(pcaX, pcaY, 'x') line4, = plt.plot(projX, projY, '.') plt.legend([line1, line2, line3, line4], ['class 1', 'class 2', 'pca projection', 'lsa projection']) plt.gca().set_aspect('equal') plt.show()