Exemplo n.º 1
0
def test_invalid_class():
    ppn = Perceptron(epochs=40, eta=0.01, random_seed=1)
    try:
        ppn.fit(X, y2)  # -2, 1 class
        assert (1 == 2)
    except ValueError:
        pass
Exemplo n.º 2
0
def test_standardized_iris_data():

    t1 = np.array([0.18, 0.41, 0.50])
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)

    ppn.fit(X_std, y1)  # -1, 1 class
    assert ((y1 == ppn.predict(X_std)).all())
Exemplo n.º 3
0
def test_standardized_iris_data():

    t1 = np.array([0.18, 0.41, 0.50])
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)

    ppn.fit(X_std, y1)  # -1, 1 class
    assert((y1 == ppn.predict(X_std)).all())
Exemplo n.º 4
0
def test_invalid_class():
    ppn = Perceptron(epochs=40, eta=0.01, random_seed=1)
    try:
        ppn.fit(X, y2)  # -2, 1 class
        assert 1 == 2
    except ValueError:
        pass
Exemplo n.º 5
0
    def __init__(self, eta=0.1, epochs=50, random_seed=None, print_progress=0):
        epochs = int(epochs)

        warnings.filterwarnings(module='mlxtend*',
                                action='ignore',
                                category=FutureWarning)

        _Perceptron.__init__(self, eta, epochs, random_seed, print_progress)
        BaseWrapperClf.__init__(self)
Exemplo n.º 6
0
def test_invalid_labels_2():
    y1 = np.where(y0 == 0, -1, 1)
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)

    assert_raises(AttributeError,
                  'y array must not contain negative labels.\nFound [-1  1]',
                  ppn.fit, X, y1, {(-1, 1)})
Exemplo n.º 7
0
def test_invalid_labels_1():
    y1 = np.where(y0 == 0, 2, 1)
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)
    assert_raises(AttributeError,
                  'Labels not in {(0, 1)}.\nFound (1, 2)',
                  ppn.fit,
                  X,
                  y1,
                  {(0, 1)})
Exemplo n.º 8
0
def test_invalid_labels_1():
    y1 = np.where(y0 == 0, 2, 1)
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)

    if sys.version_info >= (3, 0):
        objtype = '{(0, 1)}'
    else:
        objtype = 'set([(0, 1)])'

    expect = 'Labels not in %s.\nFound (1, 2)' % objtype

    assert_raises(AttributeError, expect, ppn.fit, X, y1, {(0, 1)})
Exemplo n.º 9
0
def test_0_1_class_iris_data():

    t1 = np.array([0.26, -0.  ,  0.27])
    ppn = Perceptron(epochs=40, eta=0.01, random_seed=1)
    ppn.fit(X, y0)  # 0, 1 class
    assert((y0 == ppn.predict(X)).all())
Exemplo n.º 10
0
from mlxtend.evaluate import plot_decision_regions  #! ImportError: cannot import name 'plot_decision_regions'
from mlxtend.classifier import Perceptron
from mlxtend.classifier import Adaline
from mlxtend.classifier import MultiLayerPerceptron
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
import matplotlib.gridspec as gridspec
import itertools

gs = gridspec.GridSpec(2, 2)  #xw
X, y = make_moons(n_samples=100, random_state=123)
fig = plt.figure(figsize=(10, 8))

ppn = Perceptron(epochs=50, eta=0.05, random_seed=0)
ppn.fit(X, y)
ada = Adaline(epochs=50, eta=0.05, random_seed=0)
ada.fit(X, y)

mlp = MultiLayerPerceptron(n_output=len(np.unique(y)),
                           n_features=X.shape[1],
                           n_hidden=150,
                           l2=0.0,
                           l1=0.0,
                           epochs=500,
                           eta=0.01,
                           alpha=0.0,
                           decrease_const=0.0,
                           minibatches=1,
                           shuffle_init=False,
Exemplo n.º 11
0
# Author: Sebastian Raschka <sebastianraschka.com>
#
# License: BSD 3 clause

from mlxtend.classifier import Perceptron
from mlxtend.data import iris_data
import numpy as np
from nose.tools import raises

# Iris Data
X, y = iris_data()
X = X[:, [0, 3]]  # sepal length and petal width
X = X[0:100]  # class 0 and class 1
y0 = y[0:100]  # class 0 and class 1
y1 = np.where(y[0:100] == 0, -1, 1)  # class -1 and class 1
y2 = np.where(y[0:100] == 0, -2, 1)  # class -2 and class 1

# standardize
X_std = np.copy(X)
X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()


ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)
ppn = ppn.fit(X_std, y1)  # -1, 1 class
assert((y1 == ppn.predict(X_std)).all())


        
test_standardized_iris_data()
Exemplo n.º 12
0
def test_array_dimensions():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)
    ppn = ppn.fit(np.array([1, 2, 3]), [-1])
Exemplo n.º 13
0
def test_standardized_iris_data_with_zero_weights():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, zero_init_weight=True)
    ppn = ppn.fit(X_std, y1)  # -1, 1 class
    assert (y1 == ppn.predict(X_std)).all()
Exemplo n.º 14
0
def test_progress_3():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, print_progress=3)
    ppn = ppn.fit(X_std, y0)
Exemplo n.º 15
0
def test_nonstandardized_iris_data():
    ppn = Perceptron(epochs=100, eta=0.01, random_seed=1)
    ppn = ppn.fit(X, y0)
    assert (y0 == ppn.predict(X)).all()
Exemplo n.º 16
0
# Loading Data

X, y = iris_data()
X = X[:, [0, 3]]  # sepal length and petal width
X = X[0:100]  # class 0 and class 1
y = y[0:100]  # class 0 and class 1

# standardize
X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()
# Note that this implementation of the Perceptron expects binary class labels in {0, 1}.

# Rosenblatt Perceptron

ppn = Perceptron(
    epochs=5,  # num of passes, default 50
    eta=0.05,  # learning rate 0.0 ~ 1.0, default 0.1
    random_seed=0,
    print_progress=3)
ppn.fit(X, y)

plot_decision_regions(X, y, clf=ppn)
plt.title('Perceptron - Rosenblatt Perceptron Rule')
plt.show()

print('Bias & Weights: %s' % ppn.w_)

plt.plot(range(len(ppn.cost_)), ppn.cost_)
plt.xlabel('Iterations')
plt.ylabel('Missclassifications')
plt.show()
Exemplo n.º 17
0
def test_nonstandardized_iris_data():

    t1 = np.array([0.078, -0.074, 0.46])
    ppn = Perceptron(epochs=40, eta=0.01, random_seed=1)
    ppn.fit(X, y1)  # -1, 1 class
    assert ((y1 == ppn.predict(X)).all())
Exemplo n.º 18
0
def test_nonstandardized_iris_data():

    t1 = np.array([0.078, -0.074, 0.46])
    ppn = Perceptron(epochs=40, eta=0.01, random_seed=1)
    ppn.fit(X, y1)  # -1, 1 class
    assert((y1 == ppn.predict(X)).all())
Exemplo n.º 19
0
def test_score_function():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, shuffle=True)
    ppn = ppn.fit(X_std, y1)  # -1, 1 class
    acc = ppn.score(X_std, y1)
    assert acc == 1.0, acc
Exemplo n.º 20
0
def test_ary_persistency_in_shuffling():
    orig = X.copy()
    ppn = Perceptron(epochs=40, eta=0.05, random_seed=1)
    ppn = ppn.fit(X, y0)  # 0, 1 class
    np.testing.assert_almost_equal(orig, X, 6)
Exemplo n.º 21
0
# Loading Data

X, y = iris_data()
X = X[:, [0, 3]] # sepal length and petal width
X = X[0:100] # class 0 and class 1
y = y[0:100] # class 0 and class 1

# standardize
X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
# Note that this implementation of the Perceptron expects binary class labels in {0, 1}.


# Rosenblatt Perceptron

ppn = Perceptron(epochs=5, # num of passes, default 50
                 eta=0.05, # learning rate 0.0 ~ 1.0, default 0.1
                 random_seed=0,
                 print_progress=3)
ppn.fit(X, y)

plot_decision_regions(X, y, clf=ppn)
plt.title('Perceptron - Rosenblatt Perceptron Rule')
plt.show()

print('Bias & Weights: %s' % ppn.w_)

plt.plot(range(len(ppn.cost_)), ppn.cost_)
plt.xlabel('Iterations')
plt.ylabel('Missclassifications')
plt.show()
Exemplo n.º 22
0
def test_array_dimensions():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)
    ppn = ppn.fit(np.array([1, 2, 3]), [-1])
Exemplo n.º 23
0
def test_standardized_iris_data():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)
    ppn = ppn.fit(X_std, y0)
    assert (y0 == ppn.predict(X_std)).all(), ppn.predict(X_std)
Exemplo n.º 24
0
def test_standardized_iris_data_with_shuffle():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, shuffle=True)
    ppn = ppn.fit(X_std, y1)  # -1, 1 class
    assert (y1 == ppn.predict(X_std)).all()
Exemplo n.º 25
0
def test_score_function():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)
    ppn = ppn.fit(X_std, y0)
    acc = ppn.score(X_std, y0)
    assert acc == 1.0, acc
Exemplo n.º 26
0
def test_score_function():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, shuffle=True)
    ppn = ppn.fit(X_std, y1)  # -1, 1 class
    acc = ppn.score(X_std, y1)
    assert acc == 1.0, acc
Exemplo n.º 27
0
def test_clone():
    ppn = Perceptron()
    clone(ppn)
Exemplo n.º 28
0
def test_standardized_iris_data_with_zero_weights():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, zero_init_weight=True)
    ppn = ppn.fit(X_std, y1)  # -1, 1 class
    assert (y1 == ppn.predict(X_std)).all()
Exemplo n.º 29
0
def test_standardized_iris_data_with_shuffle():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, shuffle=True)
    ppn = ppn.fit(X_std, y1)  # -1, 1 class
    assert (y1 == ppn.predict(X_std)).all()
Exemplo n.º 30
0
def test_0_1_class_iris_data():
    ppn = Perceptron(epochs=40, eta=0.05, random_seed=1)
    ppn = ppn.fit(X, y0)  # 0, 1 class
    print(y0)
    print(ppn.predict(X))
    assert (y0 == ppn.predict(X)).all()
Exemplo n.º 31
0
def test_0_1_class_iris_data():
    ppn = Perceptron(epochs=40, eta=0.05, random_seed=1)
    ppn = ppn.fit(X, y0)  # 0, 1 class
    print(y0)
    print(ppn.predict(X))
    assert (y0 == ppn.predict(X)).all()
Exemplo n.º 32
0
def test_progress_3():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, print_progress=3)
    ppn = ppn.fit(X_std, y0)
Exemplo n.º 33
0
from mlxtend.data import iris_data
from mlxtend.plotting import plot_decision_regions
from mlxtend.classifier import Perceptron
import matplotlib.pyplot as plt
import numpy as np

# Loading Data

X, y = iris_data()
X = X[:, [0, 3]] # sepal length and petal width
#print(X)
X = X[0:100] # class 0 and class 1
print(X.shape)
y = y[0:100] # class 0 and class 1
print(y.shape)

# standardize
X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
print(X)

# Rosenblatt Perceptron

ppn = Perceptron(epochs=5, 
                 eta=0.05, 
                 random_seed=0,
                 print_progress=3)
ppn.fit(X, y)
x2=np.array([[0.35866332 ,0.91401319],[5.7,1.3]])
print("\n",ppn.predict(x2))
Exemplo n.º 34
0
def test_nonstandardized_iris_data():
    ppn = Perceptron(epochs=100, eta=0.01, random_seed=1)
    ppn = ppn.fit(X, y0)
    assert (y0 == ppn.predict(X)).all()
Exemplo n.º 35
0
def test_standardized_iris_data():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)
    ppn = ppn.fit(X_std, y0)
    assert (y0 == ppn.predict(X_std)).all(), ppn.predict(X_std)
Exemplo n.º 36
0
def test_0_1_class_iris_data():

    t1 = np.array([0.26, -0., 0.27])
    ppn = Perceptron(epochs=40, eta=0.01, random_seed=1)
    ppn.fit(X, y0)  # 0, 1 class
    assert ((y0 == ppn.predict(X)).all())
Exemplo n.º 37
0
def test_score_function():
    ppn = Perceptron(epochs=15, eta=0.01, random_seed=1)
    ppn = ppn.fit(X_std, y0)
    acc = ppn.score(X_std, y0)
    assert acc == 1.0, acc
Exemplo n.º 38
0
def test_ary_persistency_in_shuffling():
    orig = X.copy()
    ppn = Perceptron(epochs=40, eta=0.05, random_seed=1)
    ppn = ppn.fit(X, y0)  # 0, 1 class
    np.testing.assert_almost_equal(orig, X, 6)
Exemplo n.º 39
0
# Loading Data

X, y = wine_data()
X = X[:, [3, 12]] # hue, ash
X = X[0:100] # class 0 and class 1
y = y[0:100] # class 0 and class 1

# standardize
X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()


# Rosenblatt Perceptron

ppn = Perceptron(epochs=500,
                 eta=0.05,
                 random_seed=1,
                 print_progress=3)
ppn.fit(X, y)


plot_decision_regions(X, y, clf=ppn)
plt.title('Wine Data - Hue and Ash 500 Epochs')
plt.show()

print('Bias & Weights: %s' % ppn.w_)

plt.plot(range(len(ppn.cost_)), ppn.cost_)
plt.xlabel('Iterations')
plt.ylabel('Missclassifications')
plt.show()
Exemplo n.º 40
0
 def identificador(self):
     w_prueba = []
     Elementos = [self.word, self.comparar]
     auxiliar_contador = 0
     for j in Elementos:
         if len(self.word) <= len(j):
             for i in range(len(self.word)):
                 if self.word[i] == j[i]:
                     w_prueba.append(1)
                 else:
                     w_prueba.append(0)
         else:
             for i in range(len(j)):
                 if self.word[i] == j[i]:
                     w_prueba.append(1)
                 else:
                     w_prueba.append(0)
     #print (w_prueba)
     #print(sum(w_prueba[:len(self.word)]))
     auxiliar_x = sum(w_prueba[:len(self.word)])
     #print(auxiliar_x)
     auxiliar_x2 = sum(w_prueba[len(self.word):len(self.word) +
                                len(self.comparar)])
     #print(auxiliar_x2)
     X = np.array([[len(self.word),
                    sum(w_prueba[:len(self.word)])],
                   [len(self.word),
                    sum(w_prueba[:len(self.word)])],
                   [len(self.word),
                    sum(w_prueba[1:len(self.word)])],
                   [len(self.word),
                    sum(w_prueba[1:len(self.word)])],
                   [len(self.word) + 1,
                    len(self.word) - 1],
                   [len(self.word) - 1,
                    len(self.word) - 1],
                   [len(self.word) + 1,
                    len(self.word) - 2],
                   [len(self.word) - 1,
                    len(self.word) - 2]])
     #print(X[:,0])
     X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
     X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()
     y = np.array([0, 0, 0, 0, 1, 1, 1, 1])
     #print(X)
     #print(y)
     ppn = Perceptron(epochs=5, eta=0.05, random_seed=0, print_progress=3)
     ppn.fit(X, y)
     X2 = (np.array([[len(self.word), auxiliar_x],
                     [len(self.word) + 1,
                      len(self.word) - 1],
                     [len(self.comparar), auxiliar_x2]]))
     #print("\n",X2[:,0].std())
     #print("\n",X2[:,1].std())
     X2[:, 0] = (X2[:, 0] - X2[:, 0].mean()) / X2[:, 0].std()
     X2[:, 1] = (X2[:, 1] - X2[:, 1].mean()) / X2[:, 1].std()
     #print(X2)
     #print("\n",ppn.predict(X2))
     resultado = ppn.predict(X2)
     self.encontro = resultado[2]
     print("\n\n")
     a = os.system("clear")
     return self.encontro