def test_0_1_class(): t1 = np.array([0.51, -0.04, 0.51]) ada = Adaline(epochs=30, eta=0.01, learning='sgd', random_seed=1) ada.fit(X_std, y0) np.testing.assert_almost_equal(ada.w_, t1, 2) assert((y0 == ada.predict(X_std)).all())
def test_stochastic_gradient_descent(): t1 = np.array([0.03, -0.09, 1.02]) ada = Adaline(epochs=30, eta=0.01, learning='sgd', random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert((y1 == ada.predict(X_std)).all())
def test_stochastic_gradient_descent(): t1 = np.array([0.03, -0.09, 1.02]) ada = Adaline(epochs=30, eta=0.01, learning='sgd', random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert ((y1 == ada.predict(X_std)).all())
def test_0_1_class(): t1 = np.array([0.51, -0.04, 0.51]) ada = Adaline(epochs=30, eta=0.01, learning='sgd', random_seed=1) ada.fit(X_std, y0) np.testing.assert_almost_equal(ada.w_, t1, 2) assert ((y0 == ada.predict(X_std)).all())
def test_print_progress_2(): ada = Adaline(epochs=30, eta=0.01, minibatches=1, print_progress=2, random_seed=1) ada.fit(X_std, y1)
def test_gradient_descent(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=30, eta=0.01, learning='gd', random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert ((y1 == ada.predict(X_std)).all())
def test_gradient_descent(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=30, eta=0.01, learning='gd', random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert((y1 == ada.predict(X_std)).all())
def test_print_progress_3(): ada = Adaline(epochs=30, eta=0.01, minibatches=1, print_progress=3, random_seed=1) ada.fit(X_std, y1)
def test_refit_weights(): t1 = np.array([[-0.08], [1.02]]) ada = Adaline(epochs=15, eta=0.01, minibatches=1, random_seed=1) ada.fit(X_std, y1, init_params=True) ada.fit(X_std, y1, init_params=False) np.testing.assert_almost_equal(ada.w_, t1, 2) assert ((y1 == ada.predict(X_std)).all())
def test_refit_weights(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=15, eta=0.01, solver='gd', random_seed=1) ada.fit(X_std, y1, init_weights=True) ada.fit(X_std, y1, init_weights=False) np.testing.assert_almost_equal(ada.w_, t1, 2) assert((y1 == ada.predict(X_std)).all())
def test_refit_weights(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=15, eta=0.01, solver='gd', random_seed=1) ada.fit(X_std, y1, init_weights=True) ada.fit(X_std, y1, init_weights=False) np.testing.assert_almost_equal(ada.w_, t1, 2) assert ((y1 == ada.predict(X_std)).all())
def test_gradient_descent(): t1 = np.array([[-0.08], [1.02]]) b1 = np.array([0.00]) ada = Adaline(epochs=30, eta=0.01, minibatches=1, random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, decimal=2) np.testing.assert_almost_equal(ada.b_, b1, decimal=2) assert ((y1 == ada.predict(X_std)).all())
def test_ary_persistency_in_shuffling(): orig = X_std.copy() ada = Adaline(epochs=30, eta=0.01, minibatches=len(y), random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(orig, X_std, 6)
def test_invalid_class(): ada = Adaline(epochs=40, eta=0.01, random_seed=1) try: ada.fit(X, y2) # 0, 1 class assert (1 == 2) except ValueError: pass
def test_normal_equation(): t1 = np.array([[-0.08], [1.02]]) b1 = np.array([0.00]) ada = Adaline(epochs=30, eta=0.01, minibatches=None, random_seed=None) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, decimal=2) np.testing.assert_almost_equal(ada.b_, b1, decimal=2) assert (y1 == ada.predict(X_std)).all(), ada.predict(X_std)
def test_score_function(): ada = Adaline(epochs=30, eta=0.01, minibatches=1, random_seed=1) ada.fit(X_std, y1) acc = ada.score(X_std, y1) assert acc == 1.0, acc
def test_invalid_class(): ada = Adaline(epochs=40, eta=0.01, random_seed=1) try: ada.fit(X, y2) # 0, 1 class assert(1==2) except ValueError: pass
def test_stochastic_gradient_descent(): t1 = np.array([[-0.08], [1.02]]) ada = Adaline(epochs=30, eta=0.01, minibatches=len(y), random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert((y1 == ada.predict(X_std)).all())
def test_score_function(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=30, eta=0.01, minibatches=1, random_seed=1) ada.fit(X_std, y1) acc = ada.score(X_std, y1) assert acc == 1.0, acc
def test_normal_equation(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=30, eta=0.01, minibatches=None, random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert((y1 == ada.predict(X_std)).all())
def test_standardized_iris_data_with_shuffle(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=30, eta=0.01, solver='gd', random_seed=1, shuffle=True) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert((y1 == ada.predict(X_std)).all())
def test_refit_weights(): t1 = np.array([[-0.08], [1.02]]) ada = Adaline(epochs=15, eta=0.01, minibatches=1, random_seed=1) ada.fit(X_std, y1, init_params=True) ada.fit(X_std, y1, init_params=False) np.testing.assert_almost_equal(ada.w_, t1, 2) assert((y1 == ada.predict(X_std)).all())
def test_standardized_iris_data_with_zero_weights(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=30, eta=0.01, minibatches=1, random_seed=1, zero_init_weight=True) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert((y1 == ada.predict(X_std)).all())
def test_standardized_iris_data_with_zero_weights(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=30, eta=0.01, solver='gd', random_seed=1, zero_init_weight=True) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert ((y1 == ada.predict(X_std)).all())
def test_gradient_descent(): t1 = np.array([[-0.08], [1.02]]) b1 = np.array([0.00]) ada = Adaline(epochs=30, eta=0.01, minibatches=1, random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, decimal=2) np.testing.assert_almost_equal(ada.b_, b1, decimal=2) assert((y1 == ada.predict(X_std)).all())
def test_normal_equation(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=30, eta=0.01, solver='normal equation', random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert ((y1 == ada.predict(X_std)).all())
def test_array_dimensions(): ada = Adaline(epochs=15, eta=0.01, random_seed=1) ada = ada.fit(np.array([1, 2, 3]), [-1])
def test_invalid_solver(): t1 = np.array([-5.21e-16, -7.86e-02, 1.02e+00]) ada = Adaline(epochs=30, eta=0.01, solver='bla', random_seed=1) ada.fit(X_std, y1)
def test_stochastic_gradient_descent(): t1 = np.array([[-0.08], [1.02]]) ada = Adaline(epochs=30, eta=0.01, minibatches=len(y), random_seed=1) ada.fit(X_std, y1) np.testing.assert_almost_equal(ada.w_, t1, 2) assert ((y1 == ada.predict(X_std)).all())
X, y = iris_data() X = X[:, [0, 3]] X = X[0:100] y = y[0:100] X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() # Closed Form Solution ada = Adaline(epochs=30, eta=0.01, minibatches=None, random_seed=1) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() # (Stochastic) Gradient Descent ada2 = Adaline(epochs=30, eta=0.01, minibatches=1, # 1 for GD learning #minibatches=len(y), # len(y) for SGD learning #minibatches=5, # for SGD learning w. minibatch size 20 random_seed=1, print_progress=3) ada2.fit(X, y)
from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std() X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std() ada = Adaline(epochs=30, eta=0.01, minibatches=None, random_seed=1) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() # ## Example 2 - Gradient Descent # In[4]: from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data