for i in range(len(df['Entrada'])):
    RNAdata.addSample(float(df['Entrada'][i] / max(df['Entrada'])),
                      float(df['Saída'][i] /
                            max(df['Saída'])))  #normalizando dados de 0 a 1

#Inicializando rede Neural
RNA = buildNetwork(
    1, 100, 500, 1000, 500, 100, 1, bias=True
)  #buildNetwork(num Neuronios na entrada, num neuronio na camada oculta, num neuronio na saida)
#print(RNA['in'])
#print(RNA['hidden0'])
#print(RNA['out'])
#print(RNA['bias'])

#Treinamento
trainer = BackpropTrainer(RNA, RNAdata)
x_err = []  #lista que vai armazenar o range de epocas
y_err = []  #lista que vai armazenar o erro em %
for i in range(2000):  #treinando a rede neural com numero de epocas
    y_err.append(trainer.train() * 100)
    x_err.append(i + 1)

NetworkWriter.writeToFile(
    RNA, 'assets/treinoRNA/topologia02.xml')  #Salvando RNA Treinada

#Simulando saida de dados
#saida = RNA.activate([0.40])*max(df['Saída']) #convetendo valor normalizado para mesma escala dos dados de saida
#print(saida)

#Criando variavel de entrada e saida para plotar o grafico com a rede neural treinada
entrada = sorted([random() for x in range(100)])
Esempio n. 2
0
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure import *
import time

now = time.time()
print now

net = buildNetwork(2, 3, 1, bias=True, hiddenclass=TanhLayer)

ds = SupervisedDataSet(2, 1)
ds.addSample((0, 0), (0, ))
ds.addSample((0, 1), (1, ))
ds.addSample((1, 0), (1, ))
ds.addSample((1, 1), (0, ))

print ds

trainer = BackpropTrainer(net, ds)
d = 1
while d > 1e-5:
    d = trainer.train()

print("结果:")
print net.activate([0, 0])
print net.activate([0, 1])
print net.activate([1, 0])
print net.activate([1, 1])

print time.time() - now
Esempio n. 3
0
        DS.appendLinked(Xv[i, :].tolist()[0], [yv[i].A[0][0]])
    DS._convertToOneOfMany()
    return DS


DS_train = conv2DS(X_train, y_train)
DS_test = conv2DS(X_test, y_test)

# A neural network without a hidden layer will simulate logistic regression (albeit very slowly)
fnn = buildNetwork(DS_train.indim,
                   DS_train.outdim,
                   outclass=SoftmaxLayer,
                   bias=True)
trainer = BackpropTrainer(fnn,
                          dataset=DS_train,
                          momentum=0.1,
                          verbose=True,
                          weightdecay=0.01)
# Train for 100 iterations.
for i in range(50):
    trainer.trainEpochs(1)
ote = fnn.activateOnDataset(DS_test)

ErrorRate = (np.argmax(ote, 1) != y_test.T).mean(dtype=float)
print('Error rate (ensemble): {0}%'.format(100 * ErrorRate))
figure(1)


def neval(xval):
    return argmax(fnn.activateOnDataset(conv2DS(np.asmatrix(xval))), 1)
def MSP(X):
    weight = np.array([
        2. / 10.5, 2. / 10.5, 2. / 10.5, 1. / 10.5, 1. / 10.5, 0.5 / 10.5,
        0.5 / 10.5, 0.5 / 10.5, 0.5 / 10.5, 0.5 / 10.5
    ])
    return np.dot(X, weight)


y = MSP(X)

for i in xrange(0, X.shape[0]):
    d.addSample(X[i, :], y[i])

tol_max = 1e-3
max_iter = 200
trainer = BackpropTrainer(n, d, learningrate=1e-3, momentum=0.9)
erroDpc = []

iter_t = 0
while max_iter > 0:
    tol = trainer.train()
    erroDpc.append(tol)
    max_iter -= 1
    print 'erro: ', tol
    if tol <= tol_max:
        break
    iter_t += 1

print 'Responda com 1 para sim e 0 para não'
print iter_t
q = np.zeros((1, 10))
target_size = y_train.shape[1]

# prepare dataset

ds = ClassificationDataSet(input_size, target_size, nb_classes=3)
ds.setField('input', x_train)
ds.setField('target', y_train)

tstdata, trndata = ds.splitWithProportion(0.25)

# init and train

net = buildNetwork(input_size, hidden_size, target_size, outclass=SoftmaxLayer)
trainer = BackpropTrainer(net,
                          dataset=trndata,
                          learningrate=0.01,
                          verbose=True,
                          weightdecay=.01)
print "training for {} epochs...".format(epochs)

trainer.trainUntilConvergence(verbose=True,
                              validationProportion=validation_proportion,
                              maxEpochs=epochs,
                              continueEpochs=continue_epochs)
trnresult = percentError(trainer.testOnClassData(), trndata['target'])
tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                         tstdata['target'])

print("epoch: %4d" % trainer.totalepochs, "  train error: %5.2f%%" % trnresult,
      "  test error: %5.2f%%" % tstresult)
Esempio n. 6
0
def initRNA():
    global d
    global erroDpc
    q = np.array([[Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, Q10]])
    InterL=InterLayer.get()
    LRate=LearnR.get()
    beta=Momentum.get()
    Iter=IterMax.get()
    erro=Tol.get()
    intervaloA=intervaloInicial.get()
    intervaloB=intervaloFinal.get()
    print q
    print b
    print f
    print InterL
    print LRate
    print beta
    print erro
    print Iter
    print intervaloA
    print intervaloB

    n = buildNetwork(10,InterL,1, bias=b, hiddenclass=eval(f))
    d = SupervisedDataSet(10,1)
    getRandomSample()
    trainer = BackpropTrainer(n, d, learningrate = LRate, momentum=beta)
    tol_max = erro
    
    n._setParameters(np.random.uniform(intervaloA,intervaloB,n.params.shape[0]))
    
    ####para que o gráfico plote de forma única
    erroDpc = [] #limpa os dados a cada plot
    plt.clf() #limpa o plot
    plt.ion() # plota de forma interativa

    iter_t = 0
    while Iter>0:
        erro = trainer.train()
        erroDpc.append(erro)
        Iter -= 1
        print 'geração:',iter_t,' | erro: ', erro
        if erro<=tol_max:
            break
        iter_t += 1
        plt.plot(erroDpc, c='r') 
        plt.xlabel('Epoca')
        plt.ylabel('Erro')
        plt.title('Decaimento do erro')
        plt.pause(0.002)

    r = n.activate(q[0,:])
    print" Chance real", MSP(q)
    real.set(MSP(q)*100)
    print"predito", 
    predict.set(r*100)

    if (r*100>80):
        Perigo = StringVar()
        Perigo.set("PERIGO")
        labelPerigo = Label(app, textvariable=Perigo,bg = "red", font = "Helvetica 13 bold")
        labelPerigo.place(x = 665, y = 650)

    if (r*100>50 and r*100<80):
        Alerta = StringVar()
        Alerta.set("ALERTA")
        labelAlerta = Label(app, textvariable=Alerta,bg = "orange", font = "Helvetica 13 bold")
        labelAlerta.place(x = 665, y = 650)

    if (r*100>30 and r*100<50):
        Atencao = StringVar()
        Atencao.set("ATENÇÃO")
        labelAtencao = Label(app, textvariable=Atencao,bg = "yellow", font = "Helvetica 13 bold")
        labelAtencao.place(x = 665, y = 650)

    if (r*100>0 and r*100<30):
        SemRisco = StringVar()
        SemRisco.set("SEM DE RISCO")
        labelSemRisco = Label(app, textvariable=SemRisco,bg = "green", font = "Helvetica 13 bold")
        labelSemRisco.place(x = 665, y = 650)
Esempio n. 7
0
print(rede['out'])
print(rede['bias'])

# Criação da base de dados de treinamento, para esse exemplo, operador XOR
base = SupervisedDataSet(2, 1)
# Primeiros parâmetros são as entradas e o segundo a saída esperada
base.addSample((0, 0), (0, ))
base.addSample((0, 1), (1, ))
base.addSample((1, 0), (1, ))
base.addSample((1, 1), (0, ))

print(base['input'])
print(base['target'])

treinamento = BackpropTrainer(rede,
                              dataset=base,
                              learningrate=0.01,
                              momentum=0.06)

for i in range(1, 30000):
    erro = treinamento.train()
    if i % 1000 == 0:
        print("Erro: %s" % erro)

print(np.round(rede.activate([0, 0])))
print(np.round(rede.activate([1, 0])))
print(np.round(rede.activate([0, 1])))
print(np.round(rede.activate([1, 1])))
"""from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer, SigmoidLayer, BiasUnit
from pybrain.structure import FullConnection
Esempio n. 8
0
for c, yy, t in zip(c2_test, y2_test, t_test):
    if yy != 0:
        test_data.addSample(c, [t])

for c, yy, t in zip(c2_all, y2_all, t_all):
    # if yy != 0:
    all_data.addSample(c, [t])

train_data._convertToOneOfMany()
test_data._convertToOneOfMany()
all_data._convertToOneOfMany()

print("building")
fnn = buildNetwork( train_data.indim, 15, train_data.outdim, fast=True,
                    outclass = SoftmaxLayer)
trainer = BackpropTrainer( fnn, dataset=train_data, momentum=0.2, verbose=True, learningrate=0.05, lrdecay=1.0)
# trainer = RPropMinusTrainer( fnn, dataset=train_data, momentum=0.1, verbose=True, learningrate=0.01, lrdecay=1.0)

# trainer.trainUntilConvergence()

best = fnn.copy()
best_test = 1

for i in range(5):
    print("training")
    trainer.trainEpochs(1)

    print("testing")
    # trnresult = trainer.testOnData()
    tstresult = trainer.testOnData( dataset=test_data )
Esempio n. 9
0
trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()
'''
implementation of BP network
'''
from pybrain.tools.shortcuts import buildNetwork  # for building network raw model
from pybrain.structure import SoftmaxLayer  # for output layer activation function
from pybrain.supervised.trainers import BackpropTrainer  # for model trainer

# network structure
n_h = 5  # hidden layer nodes number
net = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)

# 1.1 model training, using standard BP algorithm
trainer = BackpropTrainer(net, trndata)
trainer.trainEpochs(1)  # training for once

# 1.2 model training, using accumulative BP algorithm
# trainer = BackpropTrainer(net, trndata, batchlearning=True)
# trainer.trainEpochs(50)
# err_train, err_valid = trainer.trainUntilConvergence(maxEpochs=50)

# convergence curve for accumulative BP algorithm process
# import matplotlib.pyplot as plt
# plt.plot(err_train,'b',err_valid,'r')
# plt.title('BP network classification')
# plt.ylabel('accuracy')
# plt.xlabel('epochs')
# plt.show()
Esempio n. 10
0
training_data._convertToOneOfMany()
test_data._convertToOneOfMany()

#********************End of Data Preparation***************************

#********************NN With BackPropagation***************************
fnn_backprop = buildNetwork(training_data.indim,
                            2,
                            training_data.outdim,
                            bias=True,
                            outclass=SoftmaxLayer)

trainer = BackpropTrainer(fnn_backprop,
                          dataset=training_data,
                          momentum=0.1,
                          verbose=True,
                          weightdecay=0.01)

epochs = 10
trnerr_backprop = []
tsterr_backprop = []
for i in xrange(epochs):
    # If you set the 'verbose' trainer flag, this will print the total error as it goes.
    trainer.trainEpochs(1)

    #output_train = testOnClassData_custom(fnn_backprop, dataset=training_data)
    output_train = trainer.testOnClassData()

    for tr in output_train:
        #print("This is the training output value: ", tr)
Esempio n. 11
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.utilities import percentError

numHidden = 30
# Following classification code: http://pybrain.org/docs/tutorial/fnn.html
# Output layer is softmax (normalizes to 0-1)
# Apparently momentum reduces oscillation, gives faster convergence
# See here: http://page.mi.fu-berlin.de/rojas/neural/chapter/K8.pdf
net = buildNetwork(ds.indim, numHidden, ds.outdim, outclass=SoftmaxLayer)

# net = net topology
# ds = input and output desired
# We multiply the weights by "weightdecay" to keep themf rom growing too large
# -- This is a regularization method
trainer = BackpropTrainer(net, ds, momentum=0.1, weightdecay=0.01)

# # Train the network once
# errorVal = trainer.train()
# print(errorVal)

# Train the network
numEpochs = 10
import sys
for i in range(numEpochs):
    errorVals = trainer.train()
    # Print how network is doing so far
    trnresult = percentError(trainer.testOnClassData(), ds['class'])
    # print("Epochs:", trainer.totalepochs)
    #print("Percent error on training data:", trnresult)
    sys.stdout.write(str(trnresult) + "%\n")  # same as print
Esempio n. 12
0
def graphNN(ticker, date, epochs, verbose):
    """
    The function builds a data set of stock prices, normalizes that data set, builds a linked data set to
    train the neural network, generates a neural network, trains the network, makes predictions, analyzes the
    predictions against testing data to generate statistics for comparison, and uses the statistics to
    generate graphs as a png file.
    :param ticker: the stock sticker to train and predict on
    :param date: the date to split the data on to create training and testing
    :param epochs: the number of times to train the network
    :param verbose: boolean value for verbose output
    :return tomorrowPrice: the price prediction for tomorrow
    :return totalTime: the total time in seconds it took to train the network on the data set
    :return averageTimePerEpoch: the average time per training run
    :return averagePercentError:the average percent error of the predictions and the testing data
    :return minPercentError:the minimum percent error of the predictions and the testing data
    """
    # request stock prices and split by the specified date to create training and testing data sets
    if verbose: print 'Requesting data...'
    data = getStockPrices(ticker, frequency="daily", update=True)
    trainData, testData = splitByDate(data, date)
    xTrain, yTrain = preprocessStocks(trainData)
    xTest, yTest = preprocessStocks(testData)
    # allocate space for predictions and error values
    fucturePredictions = []
    trainingPredictions = []
    percentError = []
    if verbose: print 'complete.'

    if verbose: print 'Normalizing data...'
    # normalize the values to a percentage of their max values to increase network training speed
    xTrain, yTrain, xTest, yTest, priceScaleFactor, timeScaleFactor = normalize(
        xTrain, yTrain, xTest, yTest)
    if verbose: print 'complete.'

    if verbose: print 'Building dataset...'
    # build a linked data set to allow for training and error calculation
    ds = SupervisedDataSet(1, 1)
    for i in range(0, len(xTrain)):
        ds.appendLinked(xTrain[i], yTrain[i])
    if verbose: print 'complete.'

    if verbose: print 'Buidling network...'
    rnn = buildNetwork(1,
                       3,
                       3,
                       3,
                       3,
                       3,
                       3,
                       3,
                       3,
                       1,
                       bias=True,
                       recurrent=True,
                       hiddenclass=TanhLayer)
    if verbose: print 'complete'

    if verbose: print 'Training network...'
    trainer = BackpropTrainer(rnn, ds, learningrate=0.01)
    totalTime, averageTimePerEpoch, trainerErrorValues, epochTimes = trainNetwork(
        trainer, epochs, verbose)
    if verbose: print 'Training network 100.0% complete.'

    if verbose: print 'Predicting...'
    # prime the network
    for i in xTrain:
        rnn.activate(i)

    # make predictions with network on the training data to show general shape of approximated function
    for i in xTrain:
        trainingPredictions.append(rnn.activate(i))
    # make predictions with the network on the testing data to validate the accuracy of the network
    for i in xTest:
        fucturePredictions.append(rnn.activate(i))

    # predict tomorrow's price
    tomorrowPrice = rnn.activate(xTest[len(xTest) - 1] + 1) * priceScaleFactor
    if verbose: print 'complete.'

    if verbose: print 'Generating graphs...'
    # denormalize
    xTrain, yTrain, xTest, yTest, fucturePredictions, trainingPredictions = denormalize(
        xTrain, yTrain, xTest, yTest, fucturePredictions, trainingPredictions,
        priceScaleFactor, timeScaleFactor)

    # calculate percent error
    for i in range(0, len(yTest)):
        percentError.append((abs(
            (yTest[i] - fucturePredictions[i]) / yTest[i]) * 100))

    # calculates statistics on the analysis of the network
    sumPercentError = sum(percentError)
    averagePercentError = sumPercentError / len(percentError)
    numDataPoints = len(xTrain) + len(xTest)
    minPercentError = min(percentError)

    # generate the graphs and save them to the working directory
    graphOutput(xTrain, yTrain, xTest, yTest, fucturePredictions,
                trainingPredictions, ticker)
    if verbose: print 'complete.'

    # returns
    return tomorrowPrice, numDataPoints, totalTime, averageTimePerEpoch, averagePercentError, minPercentError
testdata = data[(len(data) / 2) + 1:]

print "Importing actual data"
actualdata = numpy.genfromtxt('trainR.csv', delimiter=',')

print "Adding samples to dataset and setting up neural network"
ds = ClassificationDataSet(784, 10, nb_classes=10)
for x in traindata:
    ds.addSample(tuple(x[1:]), tuple(x[0:1]))
ds._convertToOneOfMany(bounds=[0, 1])
net = buildNetwork(784, 100, 10, bias=True, outclass=SoftmaxLayer)

print "Training the neural network"
trainer = BackpropTrainer(net,
                          dataset=ds,
                          momentum=0.1,
                          verbose=True,
                          weightdecay=0.01)
for i in range(3):
    # train the network for 1 epoch
    trainer.trainEpochs(1)

    # evaluate the result on the training and test data
    trnresult = percentError(trainer.testOnClassData(),
                             [x[0] for x in traindata])

    # print the result
    print "epoch: " + str(
        trainer.totalepochs) + "  train error: " + str(trnresult)

print ""
Esempio n. 14
0
    def __init__(self):
        self.refcase = None
        self.ranking = []  # (casenum, caseorder, performance )
        self.failed = []  # (casenum, caseorder, performance )
        self.casenums = []

        # configure for case generation
        if State.direct.has_key('casegen'):
            if len(State.direct['casegen']) != 1:
                raise UserException('Only one CASEGEN directive is allowed')
            sub = State.direct['casegen'][0][0]  # (sub, stmt, span)
            subsub = sub.cases[0][0][0]
            item = SrcFile.applymap(subsub.case[0][0][0])
            attrs = SrcFile.applymap(subsub.case[0][1])

            if item == 'rand':
                self.selectfunc, self.prefunc, self.postfunc, self.updatefunc = casegen_functions_random
            elif item == 'dtree':
                self.selectfunc, self.prefunc, self.postfunc, self.updatefunc = casegen_functions_dtree
            else:
                raise UserException(
                    '%s is not valid cage generation algorithm' % item)
        else:
            self.selectfunc, self.prefunc, self.postfunc, self.updatefunc = casegen_functions_random

        # configure for measurment
        self.measure = {}
        for sub, stmt, span in State.direct['measure']:
            for subsub in sub.cases[0][0]:
                item = SrcFile.applymap(subsub.case[0][0][0])
                attrs = SrcFile.applymap(subsub.case[0][1])
                self.measure[item] = attrs

        # configure for verification
        self.verify = {}
        for sub, stmt, span in State.direct['verify']:
            for subsub in sub.cases[0][0]:
                item = SrcFile.applymap(subsub.case[0][0][0])
                attrs = SrcFile.applymap(subsub.case[0][1])
                self.verify[item] = attrs

        self.rank_var = State.direct['rank'][0][0].cases[0][0][0].case[0][0][0]
        self.rank_attrs = State.direct['rank'][0][0].cases[0][0][0].case[0][1]

        # build Neural Network
        casenum, casenumseq, directs, objs = get_directs(
            self.selectfunc, self.prefunc, self.postfunc)

        self.casesizes = []
        for caseidx, caseobj in casenumseq:
            self.casesizes.append(
                max(1, int(math.ceil(math.log(caseobj.size, 2)))))
        self.NN_input_size = sum(self.casesizes)
        self.NN_hidden_layers = 4
        self.NN_target_size = 1

        self.NN_net = buildNetwork(self.NN_input_size,
                                   self.NN_hidden_layers,
                                   self.NN_target_size,
                                   bias=True)
        self.NN_trainer = BackpropTrainer(self.NN_net,
                                          momentum=0.1,
                                          weightdecay=0.01,
                                          learningrate=0.01)
        self.NN_ds = SupervisedDataSet(self.NN_input_size, self.NN_target_size)

        self.NN_basket_size = 1000
        self.NN_min_dataset_size = min(20, State.cases['size'])
        self.NN_amp_factor = 1.0
Esempio n. 15
0
    def testBaseLine(self):
        #return # disable slow test for now
        logger.info("Running 20NG NB baseline...")

        logger.info("Calculating TF-IDF on 20ng data set...")
        news_train = load_mlcomp('20news-18828', 'train')
        news_test = load_mlcomp('20news-18828', 'test')
        target_names = news_test.target_names
        vectorizer = TfidfVectorizer(encoding='latin1')
        X_train = vectorizer.fit_transform(
            (open(f).read() for f in news_train.filenames))
        y_train = news_train.target
        X_test = vectorizer.transform(
            (open(f).read() for f in news_test.filenames))
        y_test = news_test.target

        del news_train, news_test

        logger.info("Running MultinomialNB...")
        clf = MultinomialNB().fit(X_train, y_train)
        print(
            classification_report(y_test,
                                  clf.predict(X_test),
                                  target_names=target_names))

        del clf

        logger.info("Running pybrain...")

        from pybrain.datasets import ClassificationDataSet
        from pybrain.utilities import percentError
        from pybrain.tools.shortcuts import buildNetwork
        from pybrain.supervised.trainers import BackpropTrainer
        from pybrain.structure.modules import SoftmaxLayer
        from pybrain.tools.xml.networkwriter import NetworkWriter
        from pybrain.tools.xml.networkreader import NetworkReader

        trndata = ClassificationDataSet(len(vectorizer.get_feature_names()),
                                        1,
                                        nb_classes=len(target_names),
                                        class_labels=target_names)
        for i, x in enumerate(X_train):
            #print x, y_train[i]
            trndata.addSample(x.toarray(), y_train[i])
        trndata._convertToOneOfMany()
        del X_train, y_train

        tstdata = ClassificationDataSet(len(vectorizer.get_feature_names()),
                                        1,
                                        nb_classes=len(target_names),
                                        class_labels=target_names)
        for i, x in enumerate(X_test):
            tstdata.addSample(x.toarray(), y_test[i])
        tstdata._convertToOneOfMany()
        del X_test, y_test

        logger.info("Building network...")
        fnn = buildNetwork(trndata.indim,
                           100,
                           trndata.outdim,
                           outclass=SoftmaxLayer)
        trainer = BackpropTrainer(fnn,
                                  dataset=trndata,
                                  momentum=0.1,
                                  learningrate=0.01,
                                  verbose=True,
                                  weightdecay=0.01)

        logger.info("Training pybrain for 50 epochs...")
        trainer.trainEpochs(50)
        pred = fnn.activateOnDataset(tstdata)
        pred = np.argmax(pred, axis=1)  # argmax gives the class

        print(classification_report(y_test, pred, target_names=target_names))
Esempio n. 16
0
        #print inp, out 
        dataset.addSample(inp, [out])

    #Some Pybrain Magic
    dataset._convertToOneOfMany()
    return dataset

    
train_dataset = create_dataset(train, train_labels)
test_dataset = create_dataset(test, test_labels)


# 784=28*28 inputs 20 middle layer, 10 outputs, Softmax is recommended for classification
network = buildNetwork(784,100, 20,10, outclass=SoftmaxLayer)

trainer =  BackpropTrainer(network, dataset = train_dataset,  weightdecay=0.01)


def one_iteration():

    print "Training"
    trainer.trainEpochs(1)

    print "Testing on testing dataset"

    tstresult = percentError(trainer.testOnClassData(dataset = test_dataset),test_dataset['class'])
    print "Error %5.2f" % tstresult

    rsamples = [get_random_sample_with_label() for i in range(10)]
        
    rsample,  rlabel = zip(*rsamples)
Esempio n. 17
0
         net2Filename)
 if not isfile(net1Filename):
     NetworkWriter.writeToFile(network1, net1Filename)
 if not isfile(net2Filename):
     NetworkWriter.writeToFile(network2, net2Filename)
 trainingSets1 = SupervisedDataSet(1, 1)
 trainingSets2 = SupervisedDataSet(1, 1)
 [
     trainingSets1.addSample(trainingSetsA[i], a(trainingSetsA[i]))
     for i in range(len(trainingSetsA))
 ]
 [
     trainingSets2.addSample(trainingSetsB[i], b(trainingSetsB[i]))
     for i in range(len(trainingSetsB))
 ]
 trainer1 = BackpropTrainer(network1, trainingSets1, learningrate=0.1)
 trainer2 = BackpropTrainer(network2, trainingSets2, learningrate=0.1)
 trainer1.trainUntilConvergence()
 trainer2.trainUntilConvergence()
 trainOutputA = []
 trainOutputB = []
 validateOutputA = []
 validateOutputB = []
 errA = []
 errB = []
 [
     trainOutputA.append(a(trainingSetsA[i]))
     for i in range(len(trainingSetsA))
 ]
 [
     trainOutputB.append(b(trainingSetsB[i]))
Esempio n. 18
0
with open('vsample.csv', 'rb') as f:
    reader = csv.reader(f)
    for row in reader:
        d_input = map(float, row[1:10])
        output = map(float, row[10])
        n_input = d_input / numpy.linalg.norm(d_input)
        ds.addSample(d_input, output)
        temp_ds.addSample(d_input)

#print ds
cfg = RbmGibbsTrainerConfig()
cfg.maxIter = 3
rbm = Rbm.fromDims(9, 5)
trainer = BackpropTrainer(net,
                          dataset=ds,
                          learningrate=0.001,
                          weightdecay=0.01,
                          verbose=True)
#trainer = DeepBeliefTrainer(net, dataset=temp_ds)
#trainer = RbmBernoulliTrainer(rbm, temp_ds, cfg)
for i in range(30):
    trainer.trainEpochs(30)

print 'Expected:1 [FRAUD]     ', net.activate(
    [49, 2.6, 0.98, 4.3, 1.48, 10, 2.5, 6, 67])
print 'Expected:0 [NOT FRAUD] ', net.activate(
    [78, 5, 4.4, 4.5, 2.99, 3, 1.3, 10, 59])
print 'Expected:1 [FRAUD]     ', net.activate(
    [57, 2, 0.1, 1.15, 0.47, 7, 1.8, 6, 73])
print 'Expected:0 [NOT FRAUD] ', net.activate(
    [65, 3, 11.1, 1.8, 0.6, 4, 4, 4.5, 90])
#Build net and training
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import SoftmaxLayer
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.utilities import percentError

n_hidden = 500
bp_nn = buildNetwork(trndata.indim, n_hidden, trndata.outdim, 
                     outclass = SoftmaxLayer)
#Build arbitrarily deep networks
#`layers` should be a list or tuple of integers, that indicate how many 
#neurons the layers should have.
trainer = BackpropTrainer(bp_nn, 
                          dataset = trndata, 
                          verbose = True, 
                          momentum = 0.5, 
                          learningrate = 0.0001, 
                          batchlearning = True)
#The learning rate gives the ratio of which parameters are changed into the 
#direction of the gradient. The learning rate decreases by `lrdecay`, which 
#is used to multiply the learning rate after each training step.
#The parameters are also adjusted with respect to `momentum`, which is the 
#ratio by which the gradient of the last timestep is used.
#If `batchlearning` is set, the parameters are updated only at the end of 
#each epoch. Default is False.

err_train, err_valid = trainer.trainUntilConvergence(maxEpochs = 1000, 
                                                     validationProportion = 0.25)
#If no dataset is given, the dataset passed during Trainer initialization is 
#used. validationProportion is the ratio of the dataset that is used for the 
#validation dataset.
Esempio n. 20
0
# Add input layer, hiddern layer and output layer into fnn
fnn.addInputModule(inLayer)
fnn.addModule(hiddenLayer)
fnn.addOutputModule(outLayer)

# Create full connection between each layer
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)

# Create connection with fnn
fnn.addConnection(in_to_hidden)
fnn.addConnection(hidden_to_out)
fnn.sortModules()

# Train fnn using BP until convergence
trainer = BackpropTrainer(fnn, ds_train, learningrate = 0.01, verbose = True,
                          weightdecay = 0.1)
# batchlearning = True, weightdecay = 0.1, momentum
err_train, err_valid = trainer.trainUntilConvergence(maxEpochs = 1000)

# convergence curve
import matplotlib.pyplot as plt
plt.plot(err_train, 'b', err_valid, 'r')
plt.show()

# model testing
from pybrain.utilities import percentError
testResult = percentError(trainer.testOnClassData(), ds_test['target'])
print("epoch: %d" % trainer.totalepochs, "test error: %f%%" % testResult)

#%%
# Save model and result
Esempio n. 21
0
def exec_algo(xml_file, output_location):
    rootObj = ml.parse(xml_file)

    #Getting the root element so that we get the subclasses and its members and member function
    xmlParamDetails = rootObj.MachineLearning.classification

    #Gather param values from the XML parsed object
    file = open(xmlParamDetails.datafile)
    var_inp = xmlParamDetails.input
    var_out = xmlParamDetails.output
    classes = xmlParamDetails.classes
    split = xmlParamDetails.split
    learningrate = xmlParamDetails.algorithm.MultiLayerPerceptron.learningRate
    momentum = xmlParamDetails.algorithm.MultiLayerPerceptron.momentum
    epochs = xmlParamDetails.algorithm.MultiLayerPerceptron.epochs
    hiddenNeurons = int(
        xmlParamDetails.algorithm.MultiLayerPerceptron.hiddenLayers)
    hiddenLayer = xmlParamDetails.algorithm.MultiLayerPerceptron.hiddenLayerActivation
    outputLayer = xmlParamDetails.algorithm.MultiLayerPerceptron.outputLayerActivation
    delimiter = xmlParamDetails.delimiter

    DS = ClassificationDataSet(var_inp, var_out, nb_classes=classes)

    for line in file.readlines():
        data = [float(x) for x in line.strip().split(',') if x != '']
        inp = tuple(data[:var_inp])
        output = tuple(data[var_inp:])
        DS.addSample(inp, output)

    tstdata, trndata = DS.splitWithProportion(split)
    trdata = ClassificationDataSet(trndata.indim, var_out, nb_classes=classes)
    tsdata = ClassificationDataSet(tstdata.indim, var_out, nb_classes=classes)

    for i in xrange(trndata.getLength()):
        trdata.addSample(trndata.getSample(i)[0], trndata.getSample(i)[1])

    for i in xrange(tstdata.getLength()):
        tsdata.addSample(tstdata.getSample(i)[0], tstdata.getSample(i)[1])

    trdata._convertToOneOfMany()
    tsdata._convertToOneOfMany()

    fnn = FeedForwardNetwork()
    inputLayer = LinearLayer(trdata.indim)

    if hiddenLayer == 'Sigmoid':
        hiddenLayer = SigmoidLayer(hiddenNeurons)
    elif hiddenLayer == 'Softmax':
        hiddenLayer = SoftmaxLayer(hiddenNeurons)
    else:
        hiddenLayer = LinearLayer(hiddenNeurons)

    if outputLayer == 'Sigmoid':
        outputLayer = SigmoidLayer(trdata.outdim)
    elif outputLayer == 'Softmax':
        outputLayer = SoftmaxLayer(trdata.outdim)
    else:
        outputLayer = LinearLayer(trdata.outdim)

    fnn.addInputModule(inputLayer)
    fnn.addModule(hiddenLayer)
    fnn.addOutputModule(outputLayer)

    in_to_hidden = FullConnection(inputLayer, hiddenLayer)
    hidden_to_outputLayer = FullConnection(hiddenLayer, outputLayer)
    fnn.addConnection(in_to_hidden)
    fnn.addConnection(hidden_to_outputLayer)
    fnn.sortModules()

    trainer = BackpropTrainer(fnn,
                              dataset=trdata,
                              verbose=True,
                              learningrate=learningrate,
                              momentum=momentum)
    trainer.trainEpochs(epochs=epochs)

    trresult = percentError(trainer.testOnClassData(), trdata['class'])

    print("Training accuracy : %f " % (100 - trresult))

    ts = time.time()
    directory = output_location + sep + str(int(ts))
    makedirs(directory)
    fileObject = open(
        output_location + sep + str(int(ts)) + sep + 'pybrain_MLP', 'w')
    pickle.dump(trainer, fileObject)
    pickle.dump(fnn, fileObject)
    fileObject.close()
Esempio n. 22
0
# Create Recurrent Network Structure
net = RecurrentNetwork()
net.addInputModule(LinearLayer(2, name='in'))
net.addModule(LinearLayer(1, name='hidden'))
net.addOutputModule(LinearLayer(1, name='out'))
net.addConnection(FullConnection(net['in'], net['hidden'], name='c1'))
net.addConnection(FullConnection(net['hidden'], net['out'], name='c2'))
#net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden'], name='c3'))
net.addRecurrentConnection(FullConnection(net['out'], net['out'], name='c3'))
net.sortModules()

# Train network
trainer = BackpropTrainer(net,
                          ds,
                          learningrate=0.01,
                          batchlearning=True,
                          verbose=True)
#trainUntilConvergence does not work for RNNs, because validationProportion
#rearranges the order of the input samples
#trainErr = trainer.trainUntilConvergence(maxEpochs=50,validationProportion=0.2)
#plotCost(trainErr[1])
trainErr = np.zeros(10)
print "Network before training"
print_network(net)
print ""
for i in range(10):
    trainErr[i] = trainer.train()
    print_network(net)
    print ""
    net.reset()
def main():
    trndata, tstdata = createDS()
    for repeat in xrange(repeats):
        print 'trial', repeat
        iter_trn_results = []
        iter_tst_results = []
        nn = createNN(4, 6, 3)
        nn.randomize()
        hiddenAstrocyteLayer, outputAstrocyteLayer = \
            associateAstrocyteLayers(nn)
        trainer = BackpropTrainer(nn,
                                  dataset=trndata,
                                  learningrate=0.01,
                                  momentum=0.1,
                                  verbose=False,
                                  weightdecay=0.0)
        for grand_iter in xrange(iterations):
            if grand_iter == 0:
                trainer.train()
            # trainNGA(nn, trndata, hiddenAstrocyteLayer, outputAstrocyteLayer)
            trainer.train()

            trnresult = percentError(trainer.testOnClassData(),
                                     trndata['class'])
            iter_trn_results.append(trnresult)
            tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                                     tstdata['class'])
            iter_tst_results.append(tstresult)

            if not grand_iter % 100:
                print 'epoch %4d' % trainer.totalepochs, 'train error %5.2f%%'\
                       % trnresult, 'test error %5.2f%%' % tstresult
        # MAKE SURE NOT IN ITER LOOP
        all_trn_results.append(iter_trn_results)
        all_tst_results.append(iter_tst_results)
    assert array(iter_trn_results).shape == (iterations, ), \
            array(iter_trn_results).shape
    assert array(iter_tst_results).shape == (iterations, ), \
            array(iter_tst_results).shape
    assert array(all_trn_results).shape == (repeats, iterations), \
            array(all_trn_results).shape
    assert array(all_tst_results).shape == (repeats, iterations), \
            array(all_tst_results).shape

    a = datetime.datetime.now().utctimetuple()
    time_string = str(a[3]) + str(a[4]) + '_' + str(a[2]) + '-' + \
        str(a[1]) + '-' + str(a[0])

    if os.environ['OS'] == 'Windows_NT':
        sep = '\\'
    else:
        sep = '/'

    pybrain_dir = os.getcwd() + sep
    assert pybrain_dir[-10:-1] == 'mypybrain', \
        'is actually this ' + pybrain_dir[-10:-1]

    os.mkdir(pybrain_dir + 'experiment_results' + sep + time_string)

    trnf = open(
        pybrain_dir + 'experiment_results' + sep + time_string +
        '/all_trn_results.out', 'w')
    np.savetxt(trnf, all_trn_results)

    tstf = open(
        pybrain_dir + 'experiment_results' + sep + time_string +
        '/all_tst_results.out', 'w')
    np.savetxt(tstf, all_tst_results)
    20,  # number of hidden units
    3,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)

net4 = buildNetwork(
    5,
    15,  # number of hidden units
    15,  # number of hidden units
    3,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)
#initialize the structures
net2.randomize()
net2.sortModules()
net4.randomize()
net4.sortModules()
#create trainers
#train for set amount of epochs
#save networks to disc
trainer2 = BackpropTrainer(net2, ds2, verbose=True)
trainer2.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net2, net_fold + 'network_Type2H1NewSTD.xml')
trainer4 = BackpropTrainer(net4, ds2, verbose=True)
trainer4.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net4, net_fold + 'network_Type2H2NewSTD.xml')

print 'Work completed. Check out the networks have been saved'
Esempio n. 25
0
    print inpt, target

print '\nInputs'
print ds['input']

print '\nOutputs/Targets'
print ds['target']

#   ds.clear();

##################  Trainers #####################
from pybrain import TanhLayer
from pybrain.supervised.trainers import BackpropTrainer

net = buildNetwork(2, 3, 1, bias=True, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, ds)

print trainer.train()
#    tuple containing the errors for every training epoch.
print trainer.trainUntilConvergence()

############### Feed forward networks ######################
from pybrain.structure import FeedForwardNetwork

n = FeedForwardNetwork()

#   Constructing input, output & hidden layers & giving names to the network
from pybrain.structure import LinearLayer, SigmoidLayer

inLayer = LinearLayer(2, name='in')
hiddenLayer = SigmoidLayer(3, name='hidden')
Esempio n. 26
0
#----------
# build the network
#----------
from pybrain.structure import SigmoidLayer, LinearLayer
from pybrain.tools.shortcuts import buildNetwork

net = buildNetwork(1,
                   3, # number of hidden units
                   1
                   )
#----------
# train
#----------
from pybrain.supervised.trainers import BackpropTrainer
trainer = BackpropTrainer(net, ds, verbose=True)
trainer.trainUntilConvergence(maxEpochs = 20)
print 'Network details:'
disp_network(net)
print net.params
#----------
# evaluate
#----------

# neural net approximation
pylab.plot(xvalues,
           [ net.activate([x]) for x in xvalues ], linewidth = 2,
           color = 'blue', label = 'NN output')

# target function
pylab.plot(xvalues,
Esempio n. 27
0
        dataX = normalizer.transform(dataX)
    # / scalarization && normalization

    # training dataset construction
    for i in range(0, len(dataX)):
        ds.addSample(dataX[i], dataY[i])
    # / training dataset construction

    # nn && trainer construction
    net = buildNetwork(ds.indim, (ds.indim + ds.outdim) / 2,
                       ds.outdim,
                       bias=True,
                       outclass=SoftmaxLayer)  # building the n
    trainer = BackpropTrainer(net,
                              ds,
                              learningrate=0.15,
                              momentum=0,
                              verbose=False)  # building the trainer
    # / nn && trainer construction

    # training
    trainer.trainUntilConvergence(maxEpochs=maxEpk)  # Train, until convergence
    # for epoch in range(0,1000):
    #         trainer.train()
    # / training

    # cross validation
    win = 0
    for i in range(0, len(datapX)):
        toPredict = datapX[i]
        if scale_data == 1:
Esempio n. 28
0
print "First sample (input, target, class):"
#print trndata['input'][0], trndata['target'][0], trndata['class'][0]

# <codecell>

fnn = buildNetwork(trndata.indim,
                   trndata.indim * 2,
                   trndata.outdim,
                   outclass=SoftmaxLayer,
                   hiddenclass=SigmoidLayer)

# <codecell>

trainer = BackpropTrainer(fnn,
                          dataset=trndata,
                          momentum=0.1,
                          lrdecay=1.0,
                          verbose=True,
                          weightdecay=0.001)

# <codecell>

for i in range(0, 3):
    trainer.trainEpochs(1)

# <codecell>

trainer.testOnClassData()[:20], list(targets_train[:20])

# <codecell>

trnresult = percentError(trainer.testOnClassData(), trndata['class'])
Esempio n. 29
0
def train_separate_nets(data_set,
                        test_data,
                        n,
                        arousal_net,
                        valence_net,
                        epochs=1):
    num_inputs = len(data_set[0][0][n])
    arousal_ds = SupervisedDataSet(num_inputs, 1)
    valence_ds = SupervisedDataSet(num_inputs, 1)
    for i in range(len(data_set)):
        try:
            arousal_ds.appendLinked(data_set[i][0][n], (data_set[i][1]))
            valence_ds.appendLinked(data_set[i][0][n], (data_set[i][2]))
        except:
            print 'WARNING: INSUFFICIENT INPUT SIZE'
            continue
    print str(
        len(arousal_ds)) + ' points successfully aquired for arousal analysis'
    print str(
        len(valence_ds)) + ' points successfully aquired for valence analysis'

    arousal_trainer = BackpropTrainer(arousal_net,
                                      learningrate=0.01,
                                      momentum=0.05,
                                      verbose=True)
    valence_trainer = BackpropTrainer(valence_net,
                                      learningrate=0.01,
                                      momentum=0.05,
                                      verbose=True)

    arousal_trainer.trainOnDataset(arousal_ds)
    valence_trainer.trainOnDataset(valence_ds)
    mean_internal_errors = []
    mean_errors = []

    # Calculate initial error
    sq_arousal_errors = [(arousal_net.activate(datum[0][n]) - datum[1])**2
                         for datum in test_data]
    sq_valence_errors = [(valence_net.activate(datum[0][n]) - datum[2])**2
                         for datum in test_data]
    errors = [
        sqrt(sq_arousal_errors[i] + sq_valence_errors[i])
        for i in range(len(sq_arousal_errors))
    ]
    mean_errors.append(np.mean(errors))

    sq_arousal_errors = [
        (arousal_net.activate(data_set[i][0][n]) - data_set[i][1])**2
        for i in range(len(data_set))
    ]
    sq_valence_errors = [
        (valence_net.activate(data_set[i][0][n]) - data_set[i][2])**2
        for i in range(len(data_set))
    ]
    errors = [
        sqrt(sq_arousal_errors[i] + sq_valence_errors[i])
        for i in range(len(sq_arousal_errors))
    ]
    mean_internal_errors.append(np.mean(errors))

    for j in range(epochs / 50):
        arousal_trainer.trainEpochs(50)
        valence_trainer.trainEpochs(50)
        print 'Method ' + str(n) + ' - ' + str(
            (j + 1) * 50) + '/' + str(epochs) + ' complete'
        sq_arousal_errors = [(arousal_net.activate(datum[0][n]) - datum[1])**2
                             for datum in test_data]
        sq_valence_errors = [(valence_net.activate(datum[0][n]) - datum[2])**2
                             for datum in test_data]
        errors = [
            sqrt(sq_arousal_errors[i] + sq_valence_errors[i])
            for i in range(len(sq_arousal_errors))
        ]
        mean_errors.append(np.mean(errors))

        sq_arousal_errors = [
            (arousal_net.activate(data_set[i][0][n]) - data_set[i][1])**2
            for i in range(len(data_set))
        ]
        sq_valence_errors = [
            (valence_net.activate(data_set[i][0][n]) - data_set[i][2])**2
            for i in range(len(data_set))
        ]
        errors = [
            sqrt(sq_arousal_errors[i] + sq_valence_errors[i])
            for i in range(len(sq_arousal_errors))
        ]
        mean_internal_errors.append(np.mean(errors))

    return arousal_net, valence_net, mean_errors, mean_internal_errors
Esempio n. 30
0
if __name__ == "__main__":

    t = loadImage('krests/krest1.png')

    net = buildNetwork(len(t), len(t), 1)
    dataset = SupervisedDataSet(len(t), 1)
    dataset.addSample(loadImage('krests/krest1.png'), 100)
    dataset.addSample(loadImage('krests/krest2.png'), 100)
    dataset.addSample(loadImage('krests/krest3.png'), 100)
    dataset.addSample(loadImage('krests/krest4.png'), 100)
    dataset.addSample(loadImage('krests/krest5.png'), 100)
    dataset.addSample(loadImage('nols/nol1.png'), -100)
    dataset.addSample(loadImage('nols/nol2.png'), -100)
    dataset.addSample(loadImage('nols/nol3.png'), -100)
    dataset.addSample(loadImage('nols/nol4.png'), -100)
    dataset.addSample(loadImage('nols/nol5.png'), -100)
    dataset.addSample(loadImage('nols/nol6.png'), -100)
    dataset.addSample(loadImage('nols/nol7.png'), -100)
    #dataset.addSample(loadImage('stable/bullet1.png'), 0)

    trainer = BackpropTrainer(net, dataset)
    error = 10
    iteration = 0
    while error > 0.001:
        error = trainer.train()
        iteration += 1
        print("Error on iteration {0} is {1}".format(iteration, error))

    print("\nResult for 'krest.png': ",
          net.activate(loadImage('krests/krest.png')))
    print("\nResult for 'nol.png': ", net.activate(loadImage('nols/nol.png')))