def test_reproj_2(): pca = KPCA(n_components=2) pca.fit(X1) exp = pca.transform(X1[1, None]) assert_almost_equal(pca.X_projected_[1, None], exp, decimal=2)
def test_fail_array_transform(): pca = KPCA(n_components=2) pca.fit(X1) with pytest.raises(ValueError): pca.transform(X1[1])
plt.xlabel('PC1') plt.yticks([]) #plt.show() plt.savefig('../figs/tutorial/mlxtendex1_4.png') plt.close() # Feature space linearly seperable at x=0, also data entirely 1-D, forming horizontal line. #subspace like this can then be used as input in generalised classification models eg logistic regression. import matplotlib.pyplot as plt from sklearn.datasets import make_moons #New data, two crescents, now using 100 samples per crescent,using random state seed of 5. X2, y2 = make_moons(n_samples=200, random_state=5) #Transform new dataset according to previous KPCA parameters. X2_kpca = kpca.transform(X2) #Initial data plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5, label='fit data') plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5, label='fit data') #New data projected onto new component axes plt.scatter(X2_kpca[y2==0, 0], X2_kpca[y2==0, 1], color='orange', marker='v', alpha=0.2, label='new data') plt.scatter(X2_kpca[y2==1, 0], X2_kpca[y2==1, 1], color='cyan', marker='s', alpha=0.2, label='new data')
def test_fail_array_transform(): pca = KPCA(n_components=2) pca.fit(X1) pca.transform(X1[1])
def test_fail_array_transform(): pca = KPCA(n_components=2) pca.fit(X1) exp = pca.transform(X1[1])