示例#1
0
def trainNetwork():
    global nnet
    trainX = []
    trainY = []

    file = open('finaldata.txt')
    for line in file:
        data = []
        for d in line.split(','):
            if d != '\n':
                data.append(float(d))
        trainX.append(data)
    file.close()

    file = open('finalout.txt')
    for line in file:
        data = []
        for d in line.split(','):
            if d != '\n':
                if float(d) == 2:
                    data.append(0)
                else:
                    data.append(float(d))
        trainY.append(data)
    file.close()

    trainX, trainY = np.array(trainX), np.array(trainY)

    #CREATE NETWORK
    nnet = cb.Network()

    #CREATE LAYERS
    Lin = cb.Layer(12)
    Lhidden = cb.Layer(12, cb.LogisticNeuron)
    Lout = cb.Layer(1, cb.LogisticNeuron)
    bias = cb.Layer(1, cb.BiasUnit)

    #ADD LAYERS TO NETWORK
    nnet.addInputLayer(Lin)
    nnet.addLayer(Lhidden)
    nnet.addOutputLayer(Lout)
    nnet.addAutoInputLayer(bias)

    #CONNECT LAYERS
    Lin.connectTo(Lhidden)
    Lhidden.connectTo(Lout)
    bias.connectTo(Lhidden)
    bias.connectTo(Lout)

    #CREATE BATCH TRAINER
    rate = 0.1
    batch = cb.Trainer(nnet, trainX, trainY, rate)

    #TRAIN
    # t1 = time()
    batch.epochs(100)
示例#2
0
#CREATE LAYERS
Lin = cb.Layer(2, names=['a', 'b'])
Lhidden = cb.Layer(2, cb.LogisticNeuron, names=['c', 'd'])
Lout = cb.Layer(1, cb.LogisticNeuron, names=['e'])
bias = cb.Layer(1, cb.BiasUnit, names=['bias'])

#ADD LAYERS TO NETWORK
nnet.addInputLayer(Lin)
nnet.addLayer(Lhidden)
nnet.addOutputLayer(Lout)
nnet.addAutoInputLayer(bias)

#CONNECT LAYERS
Lin.connectTo(Lhidden)
Lhidden.connectTo(Lout)
bias.connectTo(Lhidden)
bias.connectTo(Lout)

#CREATE BATCH TRAINER
rate = 0.1
batch = cb.Trainer(nnet, X, Y, rate)

#TRAIN
t1 = time()
batch.epochs(10000)
print "Time CyBrain {}".format(time() - t1)

#PRINT RESULTS
for x in X:
    print "{} ==> {}".format(x, nnet.activateWith(x, return_value=True))