def DeltaTrain(self, X, T, eta, maxIter, maxErrorRate):
     best = self;
     bestError = 2;
     bestIt = 0;
     N=X.shape[1]    # Anzahl Trainingsdaten
     x0 = np.ones(N)[np.newaxis]
     plt.ion() # interactive mode on
     for it in range(maxIter):
         Z = self.z(X)
         Y = self.neuron(X)
         err = ErrorRate(Y, T)
         if (it%20) == 0:
             print('#{} err:{}\n{}\n{}\n{}\n{}'.format(it,err,self._W1,self._b1,self._W2,self._b2))
             nnwplot.plotTwoFeatures(X,T,self.neuron)
             #älteres python: plt.pause(0.05) # warte auf GUI event loop
         if err<bestError:
             bestError = err
             best = copy.copy(self)
             bestIt = it
         if err <= maxErrorRate:
             break
         deltaW1, deltaB1, deltaW2, deltaB2 = self.backprop(X,T,Y,Z)
         self._W1-=eta*deltaW1/N
         self._b1-=eta*deltaB1/N
         self._W2-=eta*deltaW2/N
         self._b2-=eta*deltaB2/N
     ergaenzen Sie hier den Update der Gewichte
     print('#{} err:{}\n{}\n{}\n{}\n{}'.format(it,err,self._W1,self._b1,self._W2,self._b2))
     nnwplot.plotTwoFeatures(X,T,self.neuron)
     #älteres python: plt.pause(0.05) # warte auf GUI event loop
     return bestError, bestIt
示例#2
0
    def DeltaTrain(self, X, T, eta, maxIter, maxErrorRate):
        N = X.shape[1]
        plt.ion()
        for i in range(maxIter):
            Y = self.neuron(X)  # classify
            err = ErrorRate(Y, T)  # calculate error rate
            print(err)
            if err < maxErrorRate:  # stop if maxErrorRate reached
                break
            deltaWkj = np.zeros(3)
            for j in range(self._dIn):  # iterate over weights
                summe = 0
                for n in range(N):  # iterate over input neurons
                    summe += eta * (T[n] - Y[n]) * X[j, n]
                deltaWkj[j] = 1 / N * summe

            # train bias neuron
            summe = 0
            for n in range(N):
                summe += eta * (T[n] - Y[n]) * 1
            deltaWkj[2] = 1 / N * summe

            self._W[0, 0] += deltaWkj[0]
            self._W[0, 1] += deltaWkj[1]
            self._b[0, 0] += deltaWkj[2]
            nnwplot.plotTwoFeatures(X[:2], T, self.neuron)
示例#3
0
 def DeltaTrain(self, X, T, eta, maxIter, maxErrorRate):
     best = self
     bestError = 2
     bestIt = 0
     N = X.shape[1]  # Anzahl Trainingsdaten
     x0 = np.ones(N)[np.newaxis]
     plt.ion()  # interactive mode on
     for it in range(maxIter):
         Y = self.neuron(X)
         err = ErrorRate(Y, T)
         if (it % 20) == 0:
             print('#{} {} {} {}'.format(it, self._W, self._b, err))
             nnwplot.plotTwoFeatures(X, T, self.neuron)
             plt.pause(0.05)  # warte auf GUI event loop
         if err < bestError:
             bestError = err
             best = self
             bestIt = it
         if err <= maxErrorRate:
             break
         self._W += eta * (T - Y).dot(X.T) / N
         self._b += eta * (T - Y).dot(x0.T) / N
     self = best
     print('#{} {} {} {}'.format(it, self._W, self._b, err))
     nnwplot.plotTwoFeatures(X, T, self.neuron)
     plt.pause(0.05)  # warte auf GUI event loop
     return bestError, bestIt
示例#4
0
# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""

import numpy as np

def neuron(X):
    N=X.shape[...]
    net=np.zeros(N)
    W=[....]
    for n in range(0,N):
        for j in range ...:
            net[..]+=
    return net ... # threshold ....

import nnwplot
nnwplot.plotTwoFeatures(X[...], T, neuron)
示例#5
0
plt.scatter(x, y, c=T, cmap=plt.cm.prism)


def neuron(X, W=[-0.3, 1]):

    no_Features = 2  # needs to match the size of W !
    A = 2

    N = X.shape[1]
    net = np.zeros(X.shape[1])

    for n in range(0, N):
        for j in range(0, no_Features):
            # add the sum of both the features (see no_Features) to the n'th position in net
            net[n] += X[j, n] * W[j]
            # print(X[j,n], "--", W[j])

    return net > A


lala = neuron(X, [-0.2, 1])

# drop all but the first 2 features
X = X[:2, :]
nnwplot.plotTwoFeatures(X, T, neuron)

W = [-0.2, 1]
W = [-0.1, 1]
W = [0, 1]
示例#6
0
        plt.pause(0.05)  # warte auf GUI event loop
        return bestError, bestIt


#%% Iris-Daten Laden
iris = np.loadtxt("iris.csv", delimiter=',')
X = iris[:, 0:4].T
T = iris[:, 4]

#%% Test mit den Werten des vorigen Aufgabenblatts
sln = SLN(2, 1)
sln._W = np.array([-1, 1])
sln._b = np.array([3])

plt.figure()
nnwplot.plotTwoFeatures(X[:2, :], T, sln.neuron)

plt.figure()
nnwplot.plotTwoFeatures(X[:2, :], T, sln.neuron_mit_for)

ErrorRate(sln.neuron(X[:2, :]), T < 1)
ErrorRate(sln.neuron_mit_for(X[:2, :]), T < 1)

#%% UND-Daten
Xund = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
Tund = np.array([0, 0, 0, 1])

#%% Training mit UND-Daten
plt.figure()
slnUND = SLN(2, 1)
slnUND.DeltaTrain(Xund, Tund, 0.1, 100, 0.01)
示例#7
0
"""

import numpy as np
import matplotlib.pyplot as plt
import nnwplot

data = np.loadtxt("iris.csv", delimiter=",")
# print(data.shape)

X = data[:, 0:4].T
# print(X.shape)

T = data[:, [4]].T
# print(T.shape)
plt.scatter(X[0, :], X[1, :], c=T, cmap=plt.cm.prism)


def neuron(X):
    N = X.shape[1]
    net = np.zeros(N)
    W = [-0.3, 1]
    threshold = 2

    for i in range(0, N):
        for j in range(0, 2):
            net[i] += W[j] * X[0:2, i][j]
    return net > threshold


nnwplot.plotTwoFeatures(X[0:2], T, neuron)
示例#8
0
def plotneuronWTH(W, TH):
    plt.figure()
    nnwplot.plotTwoFeatures(X[:2, :], T, neuronWTH(W, TH))
    plt.title('W: {} TH: {}'.format(W, TH))
示例#9
0
            cmap=colors.ListedColormap(['red', 'green', 'blue']))


#%%
def neuron(X):
    net = np.zeros(X.shape[1])
    W = [-1, 1]
    for n in range(0, X.shape[1]):
        for j in range(0, X.shape[0]):
            net[n] += W[j] * X[j, n]
    return net >= -3


#%%
import nnwplot
nnwplot.plotTwoFeatures(X[:2, :], T, neuron)

#%% Neues Plot-Fenster Für die letzten beiden Merkmale (als Beispiel)
plt.figure()
nnwplot.plotTwoFeatures(X[-2:, :], T, neuron)


#%% Variante mit closure
def neuronWTH(W, TH):
    def neuron(X):
        N = X.shape[1]  # Anzahl Daten/Merkmalsvektoren
        net = np.zeros(N)
        for n in range(N):
            for j in range(0, X.shape[0]):
                net[n] += W[j] * X[j, n]
        return net >= TH