예제 #1
0
    def draw_adaline_gd_graph(self):
        # 특성을 표준화합니다.
        X = self.X
        y = self.y
        X_std = np.copy(X)
        X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
        X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()

        ada = AdalineGD(n_iter=15, eta=0.01)
        ada.fit(X_std, y)

        plot_decision_regions(X_std, y, classifier=ada)
        plt.title('Adaline - Gradient Descent')
        plt.xlabel('sepal length [standardized]')
        plt.ylabel('petal length [standardized]')
        plt.legend(loc='upper left')
        plt.tight_layout()
        plt.show()

        plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
        plt.xlabel('Epochs')
        plt.ylabel('Sum-squared-error')

        plt.tight_layout()
        plt.show()
ax[0].set_title('Adaline learning rate 0.01')

ada2 = AdalineGD(eta=0.0001, n_iter=10).fit(X, y)
ax[1].plot(range(1, len(ada2.cost_)+1),
            np.log(ada2.cost_), marker='o',
            color='blue')
ax[1].set_xlabel('Epoch')
ax[1].set_ylabel('log(sum-square-error)')
ax[1].set_title('Adaline learning rate 0.0001')

X_std = np.copy(X)
X_std[:,0] = (X_std[:,0] - X_std[:,0].mean()) / X_std[:,0].std()
X_std[:,1] = (X_std[:,1] - X_std[:,1].mean()) / X_std[:,1].std()

ada = AdalineGD(n_iter=15, eta=0.01)
ada.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada)
plt.title('Adaline - Gradient Descent')
plt.xlabel('Sepal length[standardized]')
plt.ylabel('petal length[standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()

plt.plot(range(1, len(ada.cost_)+1),
            ada.cost_, marker='o',
            color='blue')
plt.xlabel('Epoch')
plt.ylabel('sum-square-error')
plt.title('Adaline learning rate 0.01 standardized data')