Esempio n. 1
0
def run_Adaline():
    Feature, Classes, Number_Of_Epochs, Learing_rate, Bias, threshold = collect_data(
    )
    adaline = Adaline(Dataset_Of_Features, int(Number_Of_Epochs),
                      float(Learing_rate), Classes, Feature, float(threshold),
                      Bias)
    adaline.classify()
def adalineModel():
    try:
        x_train, y_train, isBiased, learningRate, epochNum, MSE_Threshold, x_test, y_test = modelOperations(
            'adaline')

        W = Adaline.train(x_train, y_train, isBiased, learningRate, epochNum,
                          MSE_Threshold)

        labels = [firstClassCB.get(), secondClassCB.get()]
        Adaline.test(x_test, y_test, W, labels)
    except:
        pass
Esempio n. 3
0
class AdalineTest:
    ndata = 150
    nfeat = 4
    #dg = DataGenerator.DataGenerator(nfeat,ndata)
    #data = dg.generate()
    #data.to_csv('4d_test.data', index=False,header=False)
    #data=pd.read_csv('4d_test.data', header = None, encoding='utf-8')
    data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header = None, encoding='utf-8')
    data = data.sample(frac=1)
    pData = data.iloc[:ndata, :nfeat].values
    labels = data.iloc[:ndata,nfeat].values
    labels = np.where( labels == 'Iris-virginica',1,-1)

    model = Adaline.Adaline()
    errorrate=0
    for i in range(10):
        error=0
        trainingData=np.concatenate((pData[:15*i,:],pData[15*(i+1):,:]))
        training_labels=np.concatenate((labels[:15*i],labels[15*(i+1):]))
        testData=pData[15*i:15*(i+1),:]
        model.fit(trainingData,training_labels)
        for j in range(15):
            p = model.predict(testData[j,:])
            if p!=labels[i*15+j]:
                error+=1
        print("error rate: "+str(error/15))
        errorrate+=error/15
    print("average error rate: "+str(errorrate/10))
Esempio n. 4
0
def wsp_uczenia(learning_count, testing_count, learning_rate, max_epochs, max_errors, ranges):
    with open('badaniaLearningRate.txt', 'w') as file:
        file.writelines(
            'learning count; testing count; learning rate; max_epochs; max_error; range;max epochs of learning; mean epochs of learning; min epochs of learning; max percentage of good answers; mean percentage of good answers; min percentage of good answers\n')
        testing_set = generate(testing_count)
        learning_set = generate(learning_count)
        epochs_min_set = []
        percentage_min_set = []
        epochs_mean_set = []
        percentage_mean_set = []
        epochs_max_set = []
        percentage_max_set = []
        for rate in learning_rate:
            epochs_mean = 0
            percentage_mean = 0
            epochs_max = 0
            epochs_min = 501
            percentage_max = 0
            percentage_min = 101
            for i in range(10):
                print(i)
                adaline = Adaline(2, rate, max_epochs, -1, 1)
                epochs_of_learning = adaline.learn(learning_set, 0.3)
                percentage = adaline.test_adaline(testing_set)
                epochs_mean += epochs_of_learning
                percentage_mean += percentage
                if epochs_of_learning > epochs_max:
                    epochs_max = epochs_of_learning
                if epochs_of_learning < epochs_min:
                    epochs_min = epochs_of_learning
                if percentage > percentage_max:
                    percentage_max = percentage
                if percentage < percentage_min:
                    percentage_min = percentage
            epochs_mean /= 10
            percentage_mean /= 10
            epochs_min_set.append(epochs_min)
            percentage_min_set.append(percentage_min)
            epochs_mean_set.append(epochs_mean)
            percentage_mean_set.append(percentage_mean)
            epochs_max_set.append(epochs_max)
            percentage_max_set.append(percentage_max)
            file.writelines(
                f'{learning_count};{testing_count};{rate};{max_epochs};{0.3};[-1, 1];{epochs_max};{epochs_mean};{epochs_min};{percentage_max};{percentage_mean};{percentage_min}\n')
        show_learning_epochs(learning_rate, epochs_min_set, epochs_mean_set, epochs_max_set)
        show_learning_percentages(learning_rate, percentage_min_set, percentage_mean_set, percentage_max_set)
Esempio n. 5
0
class OVATester:
    digits = load_digits()
    svm = SVM.SVM()
    adaline = Adaline.Adaline()
    ova = OVA.OVA(svm)
    nex = 1000
    ova.fit(digits.data[:nex], digits.target[:nex])
    error = 0
    for i in range(15):
        print(digits.target[nex + i])
        if ova.predict(digits.data[nex + i]) != digits.target[nex + i]:
            error += 1
    print(error)
    def train(self):
        if self.nn is not None and self.nn.is_finished is not True:
            print "--- already working"
            return

        eta = float(self.e1.get())
        epochs = int(self.e2.get())
        percent = float(self.e3.get())
        threshold = int(self.e4.get())
        weight = str(self.e5.get())
        data_file = self.r1.get()
        learning_algorithm = self.r2.get()

        try:
            ff = open(weight, 'r')
            weight = []
            for l in ff:
                l = l.strip().split()
                l = [float(i) for i in l]
                weight.append(l)
            weight = SimplePerceptron.np.array(weight)
        except:
            weight = None

        if learning_algorithm == 1:
            self.nn = SimplePerceptron.SimplePerceptron(
                eta, epochs, threshold, weight, self.plot)
        elif learning_algorithm == 2:
            self.nn = Adaline.Adaline(eta, epochs, threshold, weight,
                                      self.plot)
        elif learning_algorithm == 3:
            self.nn = SecondOrderPerceptron.SecondOrderPerceptron(
                eta, epochs, threshold, weight, self.plot)
        else:
            return

        self.tt = threading.Thread(target=self._worker,
                                   args=(
                                       data_file,
                                       percent,
                                   ))
        self.tt.setDaemon(True)
        self.tt.start()
Esempio n. 7
0
    def test_adaline(self):

        df = pd.read_csv(
            'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
            header=None)
        y = df.iloc[0:100, 4].values
        y = np.where(y == 'Iris-setosa', -1, 1)
        X = df.iloc[0:100, [0, 2]].values

        fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
        ada1 = Adaline(iter_max=10, eta=0.01).fit(X, y)
        ax[0].plot(range(1,
                         len(ada1.cost_) + 1),
                   np.log10(ada1.cost_),
                   marker='o')
        ax[0].set_xlabel('Epochs')
        ax[0].set_ylabel('log(Sum-squared-error)')
        ax[0].set_title('Adaline - Learning rate 0.01')
        ada2 = Adaline(iter_max=10, eta=0.0001).fit(X, y)
        ax[1].plot(range(1, len(ada2.cost_) + 1), ada2.cost_, marker='o')
        ax[1].set_xlabel('Epochs')
        ax[1].set_ylabel('Sum-squared-error')
        ax[1].set_title('Adaline - Learning rate 0.0001')
        plt.show()

        X_std = np.copy(X)
        X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
        X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()
        ada = Adaline(iter_max=15, eta=0.01)
        ada.fit(X_std, y)
        plot_decision_regions(X_std, y, classifier=ada)
        plt.title('Adaline - Gradient Descent')
        plt.xlabel('sepal length [standardized]')
        plt.ylabel('petal length [standardized]')
        plt.legend(loc='upper left')
        plt.show()
        plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
        plt.title('Adaline - Learning rate 0.01')
        plt.xlabel('Epochs')
        plt.ylabel('Sum-squared-error')
        plt.show()
Esempio n. 8
0
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import Adaline as ppn

import plot_decision_regions as pdr

df = pd.read_csv('iris.data', header=None)
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-versicolor', -1, 1)
X = df.iloc[0:100, [0, 2]].values  # get first 100 rows and col0 and col2

fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))

ppn1 = ppn.Adaline(0.01, 10)
ppn1.fit(X, y)
ax[0].plot(range(1, len(ppn1.cost_) + 1), np.log10(ppn1.cost_), marker='*')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Sum-squared-error)')
ax[0].set_title('Adaline - Learning rate {}'.format(ppn1.eta))

ppn2 = ppn.Adaline(0.0001, 10)
ppn2.fit(X, y)
ax[1].plot(range(1, len(ppn2.cost_) + 1), ppn2.cost_, marker='o')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Sum-squared-error')
ax[1].set_title('Adaline - Learning rate {}'.format(ppn2.eta))
plt.show()

# standardization
X_std = np.copy(X)
import matplotlib.pyplot as plt
import numpy as np

# grabs the first 100 class labels corresponding to setosa and versicolor.
y = df.iloc[0:100, 4].values

# coverts class labels to -1 (for setosa) and 1 (for the rest, i.e., versicolor).
y = np.where(y == 'Iris-setosa', -1, 1)

# grabs sepal length and petal length, two features in columns 0 and 2.
X = df.iloc[0:100, [0, 2]].values

# creates Adaline using the class we made
from Adaline import *
ada = Adaline(eta=0.01, n_iter=15)

# standardizes dataset X
X_std = np.copy(X)
X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()

# trains the Adaline on our standardized Iris data
ada.fit(X_std, y)

# plots the decision regions and data
from DecisionRegionPlotter import *
plot_decision_regions(X_std, y, classifier=ada)
plt.title('Adaline - Gradient Descent')
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
Esempio n. 10
0
import Adaline as ada
import numpy as np

per = ada.Adaline(2, ada.step)

and_input = np.array([[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1,
                                                        1]]).astype(np.float64)

and_output = np.array([0, 0, 0, 1]).astype(np.int)

n = and_input.shape[0]
cnt = 4
while (cnt != 0):
    cnt = 0

    for i in range(n):
        differ = per.train(and_input[i], and_output[i])
        cnt += differ * differ

print('\n', per, sep='')
Esempio n. 11
0
#1-100行目の目的変数の抽出,iloc= 行列指定
y = df.iloc[0:100, 4].values
#print(y)
#iris-setosaを-1,versicolorを1に変換

#mp.where(if節,T,F)
y = np.where(y == 'Iris-setosa', -1, 1)
#print(y)

#1,3列目のみ抽出 1列目:がくの長さ、3列目:花びらの長さ
X = df.iloc[0:100, [0, 2]].values

#subplot 1枚に複数のグラフ作成 subplots(行,列,グラフ番号)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))

ada1 = Adaline.AdalineGD(n_iter=10, eta=0.01)
ada1.fit(X, y)
ax[0].plot(range(1, len(ada1.cost_) + 1), np.log10(ada1.cost_), marker='o')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Sum-squared-errors)')
ax[0].set_title('Adaline learning rate 0.01')

ada2 = Adaline.AdalineGD(n_iter=10, eta=0.0001)
ada2.fit(X, y)
ax[1].plot(range(1, len(ada2.cost_) + 1), np.log10(ada2.cost_), marker='o')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('log(Sum-squared-errors)')
ax[1].set_title('Adaline learning rate 0.0001')

plt.show()
Esempio n. 12
0
import Adaline

PATH = "iris.csv"

adaline = Adaline.Adaline(path=PATH,
                          number_features=2,
                          col1=0,
                          col2=1,
                          learning_rate=0.001,
                          number_epoch=10000)

adaline.start_train()

taxa = adaline.show_accuracy()
print('Taxa de Acertos: %.2f%%' % (taxa * 100))

adaline.plot()
Esempio n. 13
0
import numpy as np
import Adaline
import matplotlib.pyplot as plt


def readData(filename, delimiter, inputsNumber):
    data = np.genfromtxt(filename, delimiter=delimiter)
    normalizedData = (data - data.min()) / (data.max() - data.min())
    return [(np.asarray(item[:inputsNumber]), np.asarray(item[inputsNumber:]))
            for item in normalizedData]


iterations = 100000
inputsNumber = 1
learningRate = 0.001
data = readData('data.txt', ',', inputsNumber)
adaline = Adaline.Adaline(inputsNumber)
counter = 0
for i in range(0, iterations):
    x, y = data[counter][0], data[counter][1]
    output = adaline.getOutput(x)
    adaline.updateWeights(x, output, y, learningRate)
    counter += 1
    if counter >= len(data):
        counter = 0

x = np.arange(0.0, 1.0, 0.02)
plt.plot(x, [adaline.getOutput(np.array([value])) for value in x], 'r')
plt.plot([item[0] for item in data], [item[1] for item in data], 'b.')
plt.show()
Esempio n. 14
0
import Adaline as ada
import numpy as np

# Inicializacao do perceptron
per = ada.Adaline(25, ada.sigmoid)

# Leitura do input de test para A
f = open('A_input_test.txt')
A_input_test = np.ones((3, 26)).astype(np.float64)
A_input_test[0][0] = 1
A_input_test[1][0] = 1
A_input_test[2][0] = 1
i = 0
j = 1
for line in f:
    values = list(map(int, line.split()))

    if(len(values) == 0):
        i += 1
        j = 1
    else:
        for val in values:
            A_input_test[i][j] = val
            j+=1
A_out_test = np.array([1, 1, 1])

# Leitura do input de treino para A
f = open('A_input_train.txt')
A_input_train = np.ones((3, 26)).astype(np.float64)
A_input_train[0][0] = 1
A_input_train[1][0] = 1