コード例 #1
0
def getNetwork(trndata):
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(trndata.indim, name='in'))
    n.addModule(SigmoidLayer(100, name='hidden'))
    n.addOutputModule(LinearLayer(trndata.outdim, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.addRecurrentConnection(
        FullConnection(n['hidden'], n['hidden'], name='c3'))
    n.sortModules()

    # fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer(n,
                              dataset=trndata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)

    # TODO: return network and trainer here. Make another function for training
    # for i in range(20):
    # trainer.trainEpochs(1)
    # trainer.trainUntilConvergence(maxEpochs=100)

    # trnresult = percentError( trainer.testOnClassData(),trndata['class'] )
    # tstresult = percentError( trainer.testOnClassData(dataset=tstdata ), tstdata['class'] )

    # print "epoch: %4d" % trainer.totalepochs, \
    # 	"  train error: %5.2f%%" % trnresult

    # out = fnn.activateOnDataset(tstdata)
    # out = out.argmax(axis=1)  # the highest output activation gives the class
    return (n, trainer)
コード例 #2
0
def getNetwork(trndata):
	n = RecurrentNetwork()
	n.addInputModule(LinearLayer(trndata.indim, name='in'))
	n.addModule(SigmoidLayer(100, name='hidden'))
	n.addOutputModule(LinearLayer(trndata.outdim, name='out'))
	n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
	n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
	n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))
	n.sortModules()


	# fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )
	trainer = BackpropTrainer( n, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)

	# TODO: return network and trainer here. Make another function for training
	# for i in range(20):
		# trainer.trainEpochs(1)
	# trainer.trainUntilConvergence(maxEpochs=100)

	# trnresult = percentError( trainer.testOnClassData(),trndata['class'] )
	# tstresult = percentError( trainer.testOnClassData(dataset=tstdata ), tstdata['class'] )

	# print "epoch: %4d" % trainer.totalepochs, \
	# 	"  train error: %5.2f%%" % trnresult

	# out = fnn.activateOnDataset(tstdata)
	# out = out.argmax(axis=1)  # the highest output activation gives the class
	return (n, trainer)
コード例 #3
0
ファイル: main.py プロジェクト: magnastrazh/NEUCOGAR
def trained_cat_dog_RFCNN():
    n = RecurrentNetwork()

    d = get_cat_dog_trainset()
    input_size = d.getDimension('input')
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(SigmoidLayer(input_size + 1500, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))
    n.sortModules()

    t = BackpropTrainer(n, d, learningrate=0.0001)  #, momentum=0.75)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        count += 1
        if globErr < 0.01:
            break
        if count == 30:
            break

    exportCatDogRFCNN(n)
    return n
コード例 #4
0
ファイル: main.py プロジェクト: magnastrazh/NEUCOGAR
def trainedRNN():
    n = RecurrentNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
    # n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
    n.sortModules()

    draw_connections(n)
    d = getDatasetFromFile(root.path() + "/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 50:
            return trainedRNN()
    # exportRNN(n)
    draw_connections(n)

    return n
コード例 #5
0
ファイル: main.py プロジェクト: DianaShatunova/NEUCOGAR
def trainedRNN():
    n = RecurrentNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
    # n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
    n.sortModules()

    draw_connections(n)
    d = getDatasetFromFile(root.path()+"/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 50:
            return trainedRNN()
    # exportRNN(n)
    draw_connections(n)

    return n
コード例 #6
0
ファイル: main.py プロジェクト: DianaShatunova/NEUCOGAR
def trained_cat_dog_RFCNN():
    n = RecurrentNetwork()

    d = get_cat_dog_trainset()
    input_size = d.getDimension('input')
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(SigmoidLayer(input_size+1500, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))
    n.sortModules()

    t = BackpropTrainer(n, d, learningrate=0.0001)#, momentum=0.75)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        count += 1
        if globErr < 0.01:
            break
        if count == 30:
            break

    exportCatDogRFCNN(n)
    return n
コード例 #7
0
ファイル: brain.py プロジェクト: SlightlyCyborg/pybot
class MoveBrain:
    def __init__(self):
        self.n = RecurrentNetwork()
        inLayer = LinearLayer(8)
        hiddenLayer = SigmoidLayer(4)
        self.numInputs = 8
        outLayer = LinearLayer(4)
        self.n.addInputModule(inLayer)
        self.n.addModule(hiddenLayer)
        self.n.addOutputModule(outLayer)

        in_to_hidden = FullConnection(inLayer, hiddenLayer)
        hidden_to_out = FullConnection(hiddenLayer, outLayer)

        self.n.addConnection(in_to_hidden)
        self.n.addConnection(hidden_to_out)

        self.n.sortModules()
        self.ds = SupervisedDataSet(8, 4) 
        self.trainer = BackpropTrainer(self.n, self.ds)

    def run(inputs):
        if inputs.size() == self.numInputs:
            self.n.activate(inputs)
        else:
            print "num of inputs do not match"

    def addRule(self,rule):
        self.ds.append(rule)

    def saveNetwork(self):
        fileObject = open('networks/avoidandfindv1', 'w')
        pickle.dump(self.n, fileObject)

        fileObject.close()
コード例 #8
0
def trainFunc(params):
    iter, trainds, validds, input_size, hidden, func, eta, lmda, epochs = params
    print('Iter:', iter, 'Epochs:', epochs, 'Hidden_size:', hidden, 'Eta:',
          eta, 'Lamda:', lmda, 'Activation:', func)

    # Build network
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(func(hidden, name='hidden'))
    n.addModule(LinearLayer(hidden, name='context'))
    n.addOutputModule(LinearLayer(1, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='in_to_hidden'))
    n.addConnection(FullConnection(n['hidden'], n['out'],
                                   name='hidden_to_out'))
    n.addRecurrentConnection(FullConnection(n['hidden'], n['context']))
    rnet = n
    rnet.sortModules()

    trainer = BackpropTrainer(n,
                              trainds,
                              learningrate=eta,
                              weightdecay=lmda,
                              momentum=0.1,
                              shuffle=False)
    trainer.trainEpochs(epochs)
    pred = np.nan_to_num(n.activateOnDataset(validds))
    validerr = eval.calc_RMSE(validds['target'], pred)
    varscore = explained_variance_score(validds['target'], pred)
    return validerr, varscore, n
コード例 #9
0
ファイル: technicalsRRL.py プロジェクト: samstern/MSc-Project
def main():
    inData=createDataset()
    env = MarketEnvironment(inData)
    task = MaximizeReturnTask(env)
    numIn=min(env.worldState.shape)

    net=RecurrentNetwork()
    net.addInputModule(BiasUnit(name='bias'))
    #net.addOutputModule(TanhLayer(1, name='out'))
    net.addOutputModule((SignLayer(1,name='out')))
    net.addRecurrentConnection(FullConnection(net['out'], net['out'], name='c3'))
    net.addInputModule(LinearLayer(numIn,name='in'))
    net.addConnection(FullConnection(net['in'],net['out'],name='c1'))
    net.addConnection((FullConnection(net['bias'],net['out'],name='c2')))
    net.sortModules()
    # remove bias (set weight to 0)
    #initialParams=append(array([0.0]),net._params[1:])
    #net._setParameters(initialParams)
    #net._setParameters([ 0.0,-0.05861005,1.64281513,0.98302613])
    #net._setParameters([0., 1.77132063, 1.3843613, 4.73725269])
    #net._setParameters([ 0.0, -0.95173719, 1.92989266, 0.06837472])
    net._setParameters([ 0.0, 1.29560957, -1.14727503, -1.80005888, 0.66351325, 1.19240189])

    ts=env.ts
    learner = RRL(numIn+2,ts) # ENAC() #Q_LinFA(2,1)
    agent = LearningAgent(net,learner)
    exp = ContinuousExperiment(task,agent)

    print(net._params)
    exp.doInteractionsAndLearn(len(ts)-1)
    print(net._params)

    outData=DataFrame(inData['RETURNS']/100)
    outData['ts']=[i/100 for i in ts]
    outData['cum_log_ts']=cumsum([log(1+i) for i in outData['ts']])

    outData['Action_Hist']=env.actionHistory
    outData['trading rets']=pE.calculateTradingReturn(outData['Action_Hist'],outData['ts'])
    outData['cum_log_rets']=cumsum([log(1+x) for x in outData['trading rets']])

    paramHist=learner.paramHistory
    plt.figure(0)
    for i in range(len(net._params)):
        plt.plot(paramHist[i])
    plt.draw()

    print(pE.percentOfOutperformedMonths(outData['trading rets'],outData['ts']))


    #ax1.plot(sign(actionHist),'r')
    plt.figure(1)
    outData['cum_log_ts'].plot(secondary_y=True)
    outData['cum_log_rets'].plot(secondary_y=True)
    outData['Action_Hist'].plot()
    plt.draw()
    plt.show()
コード例 #10
0
def runNeuralLearningCurveSimulation(dataTrain, dataTest, train_tfidf, test_tfidf, outFile):
    print 'running neural learning curve'
    outFile.write('-------------------------------------\n')
    outFile.write('train==> %d, %d \n'%(train_tfidf.shape[0],train_tfidf.shape[1]))
    outFile.write('test==>  %d, %d \n'%(test_tfidf.shape[0],test_tfidf.shape[1]))
    
    trainDS = getDataSetFromTfidf(train_tfidf, dataTrain.target)
    testDS = getDataSetFromTfidf(test_tfidf, dataTest.target)
    
    print "Number of training patterns: ", len(trainDS)
    print "Input and output dimensions: ", trainDS.indim, trainDS.outdim
    print "First sample (input, target, class):"
    print len(trainDS['input'][0]), trainDS['target'][0], trainDS['class'][0]
    '''
    with SimpleTimer('time to train', outFile):
        net = buildNetwork(trainDS.indim, trainDS.indim/2, trainDS.indim/4, trainDS.indim/8, trainDS.indim/16, 2, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
        trainer = BackpropTrainer( net, dataset=trainDS, momentum=0.1, verbose=True, weightdecay=0.01, batchlearning=True)
    '''
    net = RecurrentNetwork()
    net.addInputModule(LinearLayer(trainDS.indim, name='in'))
    net.addModule(SigmoidLayer(trainDS.indim/2, name='hidden'))
    net.addModule(SigmoidLayer(trainDS.indim/4, name='hidden2'))
    net.addOutputModule(SoftmaxLayer(2, name='out'))
    net.addConnection(FullConnection(net['in'], net['hidden'], name='c1'))
    net.addConnection(FullConnection(net['hidden'], net['out'], name='c2'))
    net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden'], name='c3'))
    net.addRecurrentConnection(FullConnection(net['hidden2'], net['hidden'], name='c4'))
    net.sortModules()
    trainer = BackpropTrainer( net, dataset=trainDS, momentum=0.01, verbose=True, weightdecay=0.01)
    
    outFile.write('%s \n' % (net.__str__()))
    epochs = 200
    with SimpleTimer('time to train %d epochs' % epochs, outFile):
        for i in range(epochs):
            trainer.trainEpochs(1)
            trnresult = percentError( trainer.testOnClassData(),
                                  trainDS['class'] )
            tstresult = percentError( trainer.testOnClassData(
               dataset=testDS ), testDS['class'] )
    
            print "epoch: %4d" % trainer.totalepochs, \
                  "  train error: %5.2f%%" % trnresult, \
                  "  test error: %5.2f%%" % tstresult
                  
    outFile.write('%5.2f , %5.2f \n' % (100.0-trnresult, 100.0-tstresult))
                  
    predicted = trainer.testOnClassData(dataset=testDS)
    results = predicted == testDS['class'].flatten()
    wrong = []
    for i in range(len(results)):
        if not results[i]:
            wrong.append(i)
    print 'classifier got these wrong:'
    for i in wrong[:10]:
        print dataTest.data[i], dataTest.target[i]
        outFile.write('%s %d \n' % (dataTest.data[i], dataTest.target[i]))
コード例 #11
0
def createRecurrent(inputSize,nHidden):
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(inputSize, name='in'))
    n.addModule(SigmoidLayer(nHidden, name='hidden'))
    n.addOutputModule(LinearLayer(1, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))
    n.sortModules()
    return n
コード例 #12
0
def runNeuralSimulation(dataTrain, dataTest, train_tfidf, test_tfidf):
    outFile = open('neuralLog.txt','a')
    outFile.write('-------------------------------------\n')
    outFile.write('train==> %d, %d \n'%(train_tfidf.shape[0],train_tfidf.shape[1]))
    outFile.write('test==>  %d, %d \n'%(test_tfidf.shape[0],test_tfidf.shape[1]))
    
    trainDS = getDataSetFromTfidf(train_tfidf, dataTrain.target)
    testDS = getDataSetFromTfidf(test_tfidf, dataTest.target)
    
    print "Number of training patterns: ", len(trainDS)
    print "Input and output dimensions: ", trainDS.indim, trainDS.outdim
    print "First sample (input, target, class):"
    print len(trainDS['input'][0]), trainDS['target'][0], trainDS['class'][0]
    
#     with SimpleTimer('time to train', outFile):
#         net = buildNetwork(trainDS.indim, trainDS.indim/2, trainDS.indim/4, trainDS.indim/8, trainDS.indim/16, 2, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
#         trainer = BackpropTrainer( net, dataset=trainDS, momentum=0.1, verbose=True, weightdecay=0.01, batchlearning=True)
    net = RecurrentNetwork()
    net.addInputModule(LinearLayer(trainDS.indim, name='in'))
    net.addModule(SigmoidLayer(trainDS.indim/2, name='hidden'))
    net.addModule(SigmoidLayer(trainDS.indim/4, name='hidden2'))
    net.addOutputModule(SoftmaxLayer(2, name='out'))
    net.addConnection(FullConnection(net['in'], net['hidden'], name='c1'))
    net.addConnection(FullConnection(net['hidden'], net['out'], name='c2'))
    net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden'], name='c3'))
    net.addRecurrentConnection(FullConnection(net['hidden2'], net['hidden'], name='c4'))
    net.sortModules()
    trainer = BackpropTrainer( net, dataset=trainDS, momentum=0.01, verbose=True, weightdecay=0.01)
    
    outFile.write('%s \n' % (net.__str__()))
    epochs = 2000
    with SimpleTimer('time to train %d epochs' % epochs, outFile):
        for i in range(epochs):
            trainer.trainEpochs(1)
            trnresult = percentError( trainer.testOnClassData(),
                                  trainDS['class'] )
            tstresult = percentError( trainer.testOnClassData(
               dataset=testDS ), testDS['class'] )
    
            print "epoch: %4d" % trainer.totalepochs, \
                  "  train error: %5.2f%%" % trnresult, \
                  "  test error: %5.2f%%" % tstresult
            outFile.write('%5.2f , %5.2f \n' % (100.0-trnresult, 100.0-tstresult))
                  
    predicted = trainer.testOnClassData(dataset=testDS)
    results = predicted == testDS['class'].flatten()
    wrong = []
    for i in range(len(results)):
        if not results[i]:
            wrong.append(i)
    print 'classifier got these wrong:'
    for i in wrong[:10]:
        print dataTest.data[i], dataTest.target[i]
        outFile.write('%s %d \n' % (dataTest.data[i], dataTest.target[i]))
コード例 #13
0
def buildMinimalLSTMNetwork():
    N = RecurrentNetwork('simpleLstmNet')
    i = LinearLayer(4, name='i')
    h = LSTMLayer(1, peepholes=True, name='lstm')
    o = LinearLayer(1, name='o')
    N.addInputModule(i)
    N.addModule(h)
    N.addOutputModule(o)
    N.addConnection(IdentityConnection(i, h))
    N.addConnection(IdentityConnection(h, o))
    N.sortModules()
    return N
コード例 #14
0
ファイル: ben_svm.py プロジェクト: bau227/rnn
def build_rec(inp, hid, out):
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(inp, name='in'))
    n.addModule(TanhLayer(hid, name='hidden'))
    n.addOutputModule(SoftmaxLayer(out, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))
    n.sortModules()
    #n.randomize()

    return n
コード例 #15
0
def buildMinimalLSTMNetwork():
    N = RecurrentNetwork('simpleLstmNet')  
    i = LinearLayer(4, name='i')
    h = LSTMLayer(1, peepholes=True, name='lstm')
    o = LinearLayer(1, name='o')
    N.addInputModule(i)
    N.addModule(h)
    N.addOutputModule(o)
    N.addConnection(IdentityConnection(i, h))
    N.addConnection(IdentityConnection(h, o))
    N.sortModules()
    return N
コード例 #16
0
def buildMinimalMDLSTMNetwork():
    N = RecurrentNetwork('simpleMdLstmNet')
    i = LinearLayer(4, name = 'i')
    h = MDLSTMLayer(1, peepholes = True, name = 'mdlstm')
    o = LinearLayer(1, name = 'o')
    N.addInputModule(i)
    N.addModule(h)
    N.addOutputModule(o)
    N.addConnection(IdentityConnection(i, h, outSliceTo = 4))
    N.addRecurrentConnection(IdentityConnection(h, h, outSliceFrom = 4, inSliceFrom = 1))
    N.addConnection(IdentityConnection(h, o, inSliceTo = 1))
    N.sortModules()
    return N
コード例 #17
0
def buildMinimalMDLSTMNetwork():
    N = RecurrentNetwork('simpleMdLstmNet')
    i = LinearLayer(4, name = 'i')
    h = MDLSTMLayer(1, peepholes = True, name = 'mdlstm')
    o = LinearLayer(1, name = 'o')
    N.addInputModule(i)
    N.addModule(h)
    N.addOutputModule(o)
    N.addConnection(IdentityConnection(i, h, outSliceTo = 4))
    N.addRecurrentConnection(IdentityConnection(h, h, outSliceFrom = 4, inSliceFrom = 1))
    N.addConnection(IdentityConnection(h, o, inSliceTo = 1))
    N.sortModules()
    return N
コード例 #18
0
ファイル: tasks.py プロジェクト: claymcleod/dnn-trainer
def build_rnn(input_size, output_size, layers):
    net = RecurrentNetwork()
    layers_list = ["in"]
    net.addInputModule(LinearLayer(input_size, name="in"))
    for i in range(0, layers):
        net.addModule(ReluLayer(input_size, name="hidden"+str(i)))
        layers_list.append("hidden"+str(i))
    net.addOutputModule(TanhLayer(output_size, name="out"))
    layers_list.append("out")

    for i in range(0, len(layers_list)-1):
        net.addConnection(FullConnection(net[layers_list[i]], net[layers_list[i+1]]))

    net.sortModules()
    return net
コード例 #19
0
def buildMixedNestedNetwork():
    """ build a nested network with the inner one being a ffn and the outer one being recurrent. """
    N = RecurrentNetwork('outer')
    a = LinearLayer(1, name = 'a')
    b = LinearLayer(2, name = 'b')
    c = buildNetwork(2, 3, 1)
    c.name = 'inner'
    N.addInputModule(a)
    N.addModule(c)
    N.addOutputModule(b)
    N.addConnection(FullConnection(a,b))
    N.addConnection(FullConnection(b,c))
    N.addRecurrentConnection(FullConnection(c,c))
    N.sortModules()
    return N
コード例 #20
0
def buildMixedNestedNetwork():
    """ build a nested network with the inner one being a ffn and the outer one being recurrent. """
    N = RecurrentNetwork('outer')
    a = LinearLayer(1, name='a')
    b = LinearLayer(2, name='b')
    c = buildNetwork(2, 3, 1)
    c.name = 'inner'
    N.addInputModule(a)
    N.addModule(c)
    N.addOutputModule(b)
    N.addConnection(FullConnection(a, b))
    N.addConnection(FullConnection(b, c))
    N.addRecurrentConnection(FullConnection(c, c))
    N.sortModules()
    return N
コード例 #21
0
def buildSimpleLSTMNetwork(peepholes=False):
    N = RecurrentNetwork('simpleLstmNet')
    i = LinearLayer(100, name='i')
    h = LSTMLayer(10, peepholes=peepholes, name='lstm')
    o = LinearLayer(1, name='o')
    b = BiasUnit('bias')
    N.addModule(b)
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(FullConnection(i, h, name='f1'))
    N.addConnection(FullConnection(b, h, name='f2'))
    N.addRecurrentConnection(FullConnection(h, h, name='r1'))
    N.addConnection(FullConnection(h, o, name='r1'))
    N.sortModules()
    return N
コード例 #22
0
def buildToddNetwork(hiddenSize):
    net = RecurrentNetwork()
    inLayer = LinearLayer(sampleSize())
    hiddenLayer = SigmoidLayer(hiddenSize)
    outLayer = SigmoidLayer(outputSize())
    net.addInputModule(inLayer)
    net.addModule(hiddenLayer)
    net.addOutputModule(outLayer)
    inRecursive = WeightedPartialIdentityConnection(0.8, pitchCount+1, inLayer, inLayer)
    inToHidden = FullConnection(inLayer, hiddenLayer)
    hiddenToOut = FullConnection(hiddenLayer, outLayer)
    net.addRecurrentConnection(inRecursive)
    net.addConnection(inToHidden)
    net.addConnection(hiddenToOut)
    net.sortModules()
    return net
コード例 #23
0
ファイル: honn.py プロジェクト: kamilsa/KAIProject
def buildSimpleLSTMNetwork(peepholes = False):
    N = RecurrentNetwork('simpleLstmNet')
    i = LinearLayer(100, name = 'i')
    h = LSTMLayer(10, peepholes = peepholes, name = 'lstm')
    o = LinearLayer(1, name = 'o')
    b = BiasUnit('bias')
    N.addModule(b)
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(FullConnection(i, h, name = 'f1'))
    N.addConnection(FullConnection(b, h, name = 'f2'))
    N.addRecurrentConnection(FullConnection(h, h, name = 'r1'))
    N.addConnection(FullConnection(h, o, name = 'r1'))
    N.sortModules()
    return N
コード例 #24
0
ファイル: NeuralNet.py プロジェクト: ncvc/Sentiment
def createRecurrentNet(historySize):
	net = RecurrentNetwork()

	# Create and add layers	
	net.addInputModule(LinearLayer(historySize * 2, name='in'))
	net.addModule(SigmoidLayer(5, name='hidden'))
	net.addOutputModule(LinearLayer(1, name='out'))

	# Create and add connections between the layers
	net.addConnection(FullConnection(net['in'], net['hidden'], name='c1'))
	net.addConnection(FullConnection(net['hidden'], net['out'], name='c2'))
	net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden'], name='c3'))

	# Preps the net for use
	net.sortModules()

	return net
コード例 #25
0
def buildElmanNetwork(hiddenSize):
    net = RecurrentNetwork()
    inLayer = LinearLayer(sampleSize())
    hiddenLayer = SigmoidLayer(hiddenSize)
    outLayer = SigmoidLayer(outputSize())
    net.addInputModule(inLayer)
    net.addModule(hiddenLayer)
    net.addOutputModule(outLayer)
    hiddenRecursive = IdentityConnection(hiddenLayer, hiddenLayer)
    inToHidden = FullConnection(inLayer, hiddenLayer)
    hiddenToOut = FullConnection(hiddenLayer, outLayer)
    net.addRecurrentConnection(hiddenRecursive)
    net.addConnection(inToHidden)
    net.addConnection(hiddenToOut)
    net.sortModules()
    net.randomize()
    return net
コード例 #26
0
ファイル: neuro_probb.py プロジェクト: reetesh11/trigram
def network(dataset, input_list):
    num_words = len(input_list)
    #dividing the dataset into training and testing data
    tstdata, trndata = dataset.splitWithProportion(0.25)

    #building the network
    net = RecurrentNetwork()
    input_layer1 = LinearLayer(num_words, name='input_layer1')
    input_layer2 = LinearLayer(num_words, name='input_layer2')
    hidden_layer = TanhLayer(num_words, name='hidden_layer')
    output_layer = SoftmaxLayer(num_words, name='output_layer')
    net.addInputModule(input_layer1)
    net.addInputModule(input_layer2)
    net.addModule(hidden_layer)
    net.addOutputModule(output_layer)
    net.addConnection(FullConnection(input_layer1,
                                     hidden_layer,
                                     name='in1_to_hidden'))
    net.addConnection(FullConnection(input_layer2, hidden_layer,
                                     name='in2_to_hidden'))
    net.addConnection(FullConnection(hidden_layer,
                                     output_layer,
                                     name='hidden_to_output'))
    net.addConnection(FullConnection(input_layer1,
                                     output_layer,
                                     name='in1_to_out'))
    net.addConnection(FullConnection(input_layer2,
                                     output_layer,
                                     name='in2_to_out'))
    net.sortModules()
    #backpropagation
    trainer = BackpropTrainer(net, dataset=trndata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    #error checking part
    for i in range(10):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(), trndata['target'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                                 tstdata['target'])
        print "epoch: %4d" % trainer.totalepochs
        print "  train error: %5.10f%%" % trnresult
        print "  test error: %5.10f%%" % tstresult
    return net
コード例 #27
0
def network(dataset, input_list):
    num_words = len(input_list)
    #dividing the dataset into training and testing data
    tstdata, trndata = dataset.splitWithProportion(0.25)

    #building the network
    net = RecurrentNetwork()
    input_layer1 = LinearLayer(num_words, name='input_layer1')
    input_layer2 = LinearLayer(num_words, name='input_layer2')
    hidden_layer = TanhLayer(num_words, name='hidden_layer')
    output_layer = SoftmaxLayer(num_words, name='output_layer')
    net.addInputModule(input_layer1)
    net.addInputModule(input_layer2)
    net.addModule(hidden_layer)
    net.addOutputModule(output_layer)
    net.addConnection(
        FullConnection(input_layer1, hidden_layer, name='in1_to_hidden'))
    net.addConnection(
        FullConnection(input_layer2, hidden_layer, name='in2_to_hidden'))
    net.addConnection(
        FullConnection(hidden_layer, output_layer, name='hidden_to_output'))
    net.addConnection(
        FullConnection(input_layer1, output_layer, name='in1_to_out'))
    net.addConnection(
        FullConnection(input_layer2, output_layer, name='in2_to_out'))
    net.sortModules()
    #backpropagation
    trainer = BackpropTrainer(net,
                              dataset=trndata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    #error checking part
    for i in range(10):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(), trndata['target'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                                 tstdata['target'])
        print "epoch: %4d" % trainer.totalepochs
        print "  train error: %5.10f%%" % trnresult
        print "  test error: %5.10f%%" % tstresult
    return net
コード例 #28
0
ファイル: nn.py プロジェクト: electricFeel/StockSentimentNN
def buildNetwork(hidden_layer = 3):
    #build the network
    #create the layers
    input_layer = LinearLayer(4)
    hidden_layer = SigmoidLayer(hidden_layer, name='hidden')
    output_layer = LinearLayer(2)
    net = RecurrentNetwork()

    net.addInputModule(input_layer)
    net.addModule(hidden_layer)
    net.addOutputModule(output_layer)

    in_to_hidden = FullConnection(input_layer, hidden_layer)
    hidden_to_out = FullConnection(hidden_layer, output_layer)
    net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden']))

    net.addConnection(in_to_hidden)
    net.addConnection(hidden_to_out)

    net.sortModules()
    return net
コード例 #29
0
def buildNetwork(hidden_layer=3):
    #build the network
    #create the layers
    input_layer = LinearLayer(4)
    hidden_layer = SigmoidLayer(hidden_layer, name='hidden')
    output_layer = LinearLayer(2)
    net = RecurrentNetwork()

    net.addInputModule(input_layer)
    net.addModule(hidden_layer)
    net.addOutputModule(output_layer)

    in_to_hidden = FullConnection(input_layer, hidden_layer)
    hidden_to_out = FullConnection(hidden_layer, output_layer)
    net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden']))

    net.addConnection(in_to_hidden)
    net.addConnection(hidden_to_out)

    net.sortModules()
    return net
コード例 #30
0
ファイル: backpropanbncn.py プロジェクト: wsgan001/AI
def testTraining():
    # the AnBnCn dataset (sequential)
    d = AnBnCnDataSet()

    # build a recurrent network to be trained
    hsize = 2
    n = RecurrentNetwork()
    n.addModule(TanhLayer(hsize, name='h'))
    n.addModule(BiasUnit(name='bias'))
    n.addOutputModule(LinearLayer(1, name='out'))
    n.addConnection(FullConnection(n['bias'], n['h']))
    n.addConnection(FullConnection(n['h'], n['out']))
    n.addRecurrentConnection(FullConnection(n['h'], n['h']))
    n.sortModules()

    # initialize the backprop trainer and train
    t = BackpropTrainer(n, learningrate=0.1, momentum=0.0, verbose=True)
    t.trainOnDataset(d, 200)

    # the resulting weights are in the network:
    print('Final weights:', n.params)
コード例 #31
0
ファイル: backpropanbncn.py プロジェクト: HKou/pybrain
def testTraining():
    # the AnBnCn dataset (sequential)
    d = AnBnCnDataSet()
    
    # build a recurrent network to be trained
    hsize = 2
    n = RecurrentNetwork()
    n.addModule(TanhLayer(hsize, name = 'h'))
    n.addModule(BiasUnit(name = 'bias'))
    n.addOutputModule(LinearLayer(1, name = 'out'))
    n.addConnection(FullConnection(n['bias'], n['h']))
    n.addConnection(FullConnection(n['h'], n['out']))
    n.addRecurrentConnection(FullConnection(n['h'], n['h']))
    n.sortModules()

    # initialize the backprop trainer and train
    t = BackpropTrainer(n, learningrate = 0.1, momentum = 0.0, verbose = True)
    t.trainOnDataset(d, 200)
    
    # the resulting weights are in the network:
    print 'Final weights:', n.params
コード例 #32
0
ファイル: main.py プロジェクト: DianaShatunova/NEUCOGAR
def trainedRFCNN():
    n = RecurrentNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))

    n.sortModules()

    draw_connections(n)
    # d = generateTraininqgData()
    d = getDatasetFromFile(root.path()+"/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count = count + 1
        if (count == 100):
            return trainedRFCNN()

    # for i in range(100):
    #     print t.train()


    exportRFCNN(n)
    draw_connections(n)

    return n
コード例 #33
0
def trainFunc(params):
    iter, trainds, validds, input_size, hidden, func, eta, lmda, epochs = params
    print('Iter:', iter, 'Epochs:', epochs, 'Hidden_size:', hidden, 'Eta:', eta, 'Lamda:', lmda, 'Activation:', func)
    
    # Build network
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(input_size, name = 'in'))
    n.addModule(func(hidden, name = 'hidden'))
    n.addModule(LinearLayer(hidden, name = 'context'))
    n.addOutputModule(LinearLayer(1, name = 'out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name = 'in_to_hidden'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name = 'hidden_to_out'))
    n.addRecurrentConnection(FullConnection(n['hidden'], n['context']))
    rnet = n
    rnet.sortModules()
    
    trainer = BackpropTrainer(n, trainds, learningrate=eta, weightdecay=lmda, momentum=0.1, shuffle=False)
    trainer.trainEpochs(epochs)
    pred = np.nan_to_num(n.activateOnDataset(validds))
    validerr = eval.calc_RMSE(validds['target'], pred)
    varscore = explained_variance_score(validds['target'], pred)
    return validerr, varscore, n
コード例 #34
0
ファイル: main.py プロジェクト: magnastrazh/NEUCOGAR
def trainedRFCNN():
    n = RecurrentNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))

    n.sortModules()

    draw_connections(n)
    # d = generateTraininqgData()
    d = getDatasetFromFile(root.path() + "/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count = count + 1
        if (count == 100):
            return trainedRFCNN()

    # for i in range(100):
    #     print t.train()

    exportRFCNN(n)
    draw_connections(n)

    return n
コード例 #35
0
def construct_network(input_len, output_len, hidden_nodes, is_elman=True):
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(input_len, name="i"))
    n.addModule(BiasUnit("b"))
    n.addModule(SigmoidLayer(hidden_nodes, name="h"))
    n.addOutputModule(LinearLayer(output_len, name="o"))

    n.addConnection(FullConnection(n["i"], n["h"]))
    n.addConnection(FullConnection(n["b"], n["h"]))
    n.addConnection(FullConnection(n["b"], n["o"]))
    n.addConnection(FullConnection(n["h"], n["o"]))

    if is_elman:
        # Elman (hidden->hidden)
        n.addRecurrentConnection(FullConnection(n["h"], n["h"]))
    else:
        # Jordan (out->hidden)
        n.addRecurrentConnection(FullConnection(n["o"], n["h"]))

    n.sortModules()
    n.reset()

    return n
コード例 #36
0
ファイル: autoenc.py プロジェクト: TheMarex/autoenc
def buildNetwork(N):
    dimension = WINDOW_SIZE
    inLayer = LinearLayer(dimension)
    hiddenLayer = SigmoidLayer(N)
    outLayer = LinearLayer(dimension)
    # bias disabled, too much over training
    #bias = BiasUnit(name='bias')
    in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outLayer)
    #bias_to_out = FullConnection(bias, outLayer)
    #bias_to_hidden = FullConnection(bias, hiddenLayer)

    net = RecurrentNetwork()
    #net.addModule(bias)
    net.addInputModule(inLayer)
    net.addModule(hiddenLayer)
    net.addOutputModule(outLayer)
    net.addConnection(in_to_hidden)
    net.addConnection(hidden_to_out)
    net.addRecurrentConnection(FullConnection(hiddenLayer, hiddenLayer))
    #net.addConnection(bias_to_hidden)
    #net.addConnection(bias_to_out)
    net.sortModules()
    return net
コード例 #37
0
ファイル: rnn_demo.py プロジェクト: yukoba/rnn_demo
def construct_network(hidden_nodes, is_elman=True):
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(4, name="i"))
    n.addModule(BiasUnit("b"))
    n.addModule(ReluLayer(hidden_nodes, name="h"))
    n.addOutputModule(LinearLayer(4, name="o"))

    n.addConnection(FullConnection(n["i"], n["h"]))
    n.addConnection(FullConnection(n["b"], n["h"]))
    n.addConnection(FullConnection(n["b"], n["o"]))
    n.addConnection(FullConnection(n["h"], n["o"]))

    if is_elman:
        # Elman (hidden->hidden)
        n.addRecurrentConnection(FullConnection(n["h"], n["h"]))
    else:
        # Jordan (out->hidden)
        n.addRecurrentConnection(FullConnection(n["o"], n["h"]))

    n.sortModules()
    n.stdParams = 0.03
    n.randomize()

    return n
コード例 #38
0
def getModel(dept, hidden_size, input_size, target_size, online = False,):

	file_name = output_file_path + 'nn_dept' + str(dept) + '_epoch' + str(epochs)
	
	if online == True:
		try:
			fileObject = open(file_name + '_model', 'r')
			n = pickle.load(fileObject)
			fileObject.close()
			return n
		
		except IOError:
			print "There is no nn object for dept", dept, "exits, So a new model is built."
			pass

	n = RecurrentNetwork()

	n.addInputModule(LinearLayer(input_size, name='in'))
	n.addModule(BiasUnit('bias'))
	for i in range(0, num_hidden_layer+1):
		hidden_name = 'hidden'+str(i)
		n.addModule(SigmoidLayer(hidden_size, name=hidden_name))
	n.addOutputModule(LinearLayer(target_size, name='out'))

	n.addConnection(FullConnection(n['in'], n['hidden0'], name='c1'))
	next_hidden = 'hidden0'

	for i in range(0,num_hidden_layer ):
		current_hidden = 'hidden'+str(i)
		next_hidden = 'hidden'+str(i+1)
		n.addConnection(FullConnection(n[current_hidden], n[next_hidden], name='c'+str(i+2)))

	n.addConnection(FullConnection(n[next_hidden], n['out'], name='c'+str(num_hidden_layer+2)))
	n.addConnection(FullConnection(n['bias'], n['hidden0'], name='c'+str(num_hidden_layer+7)))

	n.sortModules()

	return n
コード例 #39
0
 def _CreateRecurentNN():
     net = RecurrentNetwork()
     net.addInputModule(LinearLayer(4, name='in'))
     net.addModule(BiasUnit(name='hidden_bias'))
     net.addModule(TanhLayer(13, name='hidden'))
     #net.addModule(BiasUnit(name='out_bias'))
     net.addOutputModule(SoftmaxLayer(2, name='out_class'))
     #net.addOutputModule(LinearLayer(1, name='out_predict'))
     #net.addConnection(FullConnection(net['out_bias'], net['out_predict']))
     net.addConnection(FullConnection(net['hidden_bias'], net['hidden']))
     net.addConnection(FullConnection(net['in'], net['hidden'], name='fc1'))
     net.addConnection(FullConnection(net['hidden'], net['out_class'], name='fc2'))
     #net.addConnection(FullConnection(net['hidden'], net['out_predict'], name='fc3'))
     net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden'], name='rc3'))
     net.sortModules()
     return net
コード例 #40
0
ファイル: network.py プロジェクト: jmoles/trail-runner
    def createJeffersonStyleNetwork(
        in_count=2,
        hidden_count=5,
        output_count=4,
        recurrent=True,
        in_to_out_connect=True,
        name=None):
        """
        Creates a Jefferson-esque neural network for trail problem.


        Returns:
            pybrain.network. The neural network.

        """

        if recurrent:
            ret_net = RecurrentNetwork(name=name)
        else:
            ret_net = FeedForwardNetwork(name=name)

        in_layer = LinearLayer(in_count, name="food")
        hidden_layer = SigmoidLayer(hidden_count, name="hidden")
        output_layer = LinearLayer(output_count, name="move")

        ret_net.addInputModule(in_layer)
        ret_net.addModule(hidden_layer)
        ret_net.addOutputModule(output_layer)

        in_to_hidden     = FullConnection(in_layer, hidden_layer)
        hidden_to_out    = FullConnection(hidden_layer, output_layer)

        ret_net.addConnection(in_to_hidden)
        ret_net.addConnection(hidden_to_out)

        if in_to_out_connect:
            in_to_out        = FullConnection(in_layer, output_layer)
            ret_net.addConnection(in_to_out)

        if recurrent:
            hidden_to_hidden = FullConnection(hidden_layer, hidden_layer)
            ret_net.addRecurrentConnection(hidden_to_hidden)

        ret_net.sortModules()

        return ret_net
コード例 #41
0
ファイル: network.py プロジェクト: jmoles/trail-runner
    def createJeffersonStyleNetwork(in_count=2,
                                    hidden_count=5,
                                    output_count=4,
                                    recurrent=True,
                                    in_to_out_connect=True,
                                    name=None):
        """
        Creates a Jefferson-esque neural network for trail problem.


        Returns:
            pybrain.network. The neural network.

        """

        if recurrent:
            ret_net = RecurrentNetwork(name=name)
        else:
            ret_net = FeedForwardNetwork(name=name)

        in_layer = LinearLayer(in_count, name="food")
        hidden_layer = SigmoidLayer(hidden_count, name="hidden")
        output_layer = LinearLayer(output_count, name="move")

        ret_net.addInputModule(in_layer)
        ret_net.addModule(hidden_layer)
        ret_net.addOutputModule(output_layer)

        in_to_hidden = FullConnection(in_layer, hidden_layer)
        hidden_to_out = FullConnection(hidden_layer, output_layer)

        ret_net.addConnection(in_to_hidden)
        ret_net.addConnection(hidden_to_out)

        if in_to_out_connect:
            in_to_out = FullConnection(in_layer, output_layer)
            ret_net.addConnection(in_to_out)

        if recurrent:
            hidden_to_hidden = FullConnection(hidden_layer, hidden_layer)
            ret_net.addRecurrentConnection(hidden_to_hidden)

        ret_net.sortModules()

        return ret_net
コード例 #42
0
ファイル: parityrnn.py プロジェクト: wsgan001/AI
def buildParityNet():
    net = RecurrentNetwork()
    net.addInputModule(LinearLayer(1, name = 'i'))
    net.addModule(TanhLayer(2, name = 'h'))
    net.addModule(BiasUnit('bias'))
    net.addOutputModule(TanhLayer(1, name = 'o'))
    net.addConnection(FullConnection(net['i'], net['h']))
    net.addConnection(FullConnection(net['bias'], net['h']))
    net.addConnection(FullConnection(net['bias'], net['o']))
    net.addConnection(FullConnection(net['h'], net['o']))
    net.addRecurrentConnection(FullConnection(net['o'], net['h']))
    net.sortModules()

    p = net.params
    p[:] = [-0.5, -1.5, 1, 1, -1, 1, 1, -1, 1]
    p *= 10.

    return net
コード例 #43
0
ファイル: network.py プロジェクト: jmoles/trail-runner
    def createJeffersonMDLNetwork(mdl_length=2,
                                  hidden_count=5,
                                  output_count=4,
                                  in_to_out_connect=True,
                                  name=None):

        ret_net = RecurrentNetwork(name=name)

        # Add some components of the neural network.
        hidden_layer = SigmoidLayer(hidden_count, name="hidden")
        output_layer = LinearLayer(output_count, name="move")

        ret_net.addModule(hidden_layer)
        ret_net.addOutputModule(output_layer)

        ret_net.addConnection(
            FullConnection(hidden_layer,
                           output_layer,
                           name="Hidden to Move Layer"))

        mdl_prev = ()

        for idx in range(0, mdl_length):
            # Create the layers
            food_layer = LinearLayer(2, name="Food {0}".format(idx))
            mdl_layer = LinearLayer(2, name="MDL Layer {0}".format(idx))

            # Add to network
            ret_net.addModule(food_layer)
            if idx == 0:
                ret_net.addInputModule(mdl_layer)
            else:
                ret_net.addModule(mdl_layer)
                # Add delay line connection.
                ret_net.addRecurrentConnection(
                    FullConnection(mdl_prev,
                                   mdl_layer,
                                   name="Recurrent DL {0} to DL {1}".format(
                                       idx - 1, idx)))

            # Add connections for
            # - Delay line to NN.
            # - NN to Hidden.
            # - NN to Out (if desired).
            ret_net.addConnection(
                FullConnection(mdl_layer,
                               food_layer,
                               name="DL {0} to Food {0}".format(idx)))
            ret_net.addConnection(
                FullConnection(food_layer,
                               hidden_layer,
                               name="Food {0} to Hidden".format(idx)))
            if in_to_out_connect:
                ret_net.addConnection(
                    FullConnection(food_layer,
                                   output_layer,
                                   name="Food {0} to Output".format(idx)))

            mdl_prev = mdl_layer

        ret_net.sortModules()

        return ret_net
コード例 #44
0
# fill it
for i in xrange(len(dataset)):
    DS.appendLinked(dataset.values[i], [tgt.values[i]])

# split 70% for training, 30% for testing
train_set, test_set = DS.splitWithProportion(.7)

# build our recurrent network with 10 hidden neurodes, one recurrent
# connection, using tanh activation functions
net = RecurrentNetwork()
hidden_neurodes = 10
net.addInputModule(LinearLayer(len(train_set["input"][0]), name="in"))
net.addModule(TanhLayer(hidden_neurodes, name="hidden1"))
net.addOutputModule(LinearLayer(len(train_set["target"][0]), name="out"))
net.addConnection(FullConnection(net["in"], net["hidden1"], name="c1"))
net.addConnection(FullConnection(net["hidden1"], net["out"], name="c2"))
net.addRecurrentConnection(
    FullConnection(net["out"], net["hidden1"], name="cout"))
net.sortModules()
net.randomize()

# train for 30 epochs (overkill) using the rprop- training algorithm
trainer = RPropMinusTrainer(net, dataset=train_set, verbose=True)
trainer.trainOnDataset(train_set, 30)

# test on training set
predictions_train = np.array(
    [net.activate(train_set["input"][i])[0] for i in xrange(len(train_set))])
plt.plot(train_set["target"], c="k")
plt.plot(predictions_train, c="r")
コード例 #45
0
def buildNonGravityNet(recurrent=False):
    if recurrent:
        net = RecurrentNetwork()
    else:
        net = FeedForwardNetwork()
    l1 = LinearLayer(2)
    l2 = LinearLayer(3)
    s1 = SigmoidLayer(2)
    l3 = LinearLayer(1)
    net.addInputModule(l1)
    net.addModule(l2)
    net.addModule(s1)
    net.addOutputModule(l3)
    net.addConnection(IdentityConnection(l1, l2, outSliceFrom=1))
    net.addConnection(IdentityConnection(l1, l2, outSliceTo=2))
    net.addConnection(IdentityConnection(l2, l3, inSliceFrom=2))
    net.addConnection(IdentityConnection(l2, l3, inSliceTo=1))
    net.addConnection(IdentityConnection(l1, s1))
    net.addConnection(IdentityConnection(l2, s1, inSliceFrom=1))
    net.addConnection(IdentityConnection(s1, l3, inSliceFrom=1))
    if recurrent:
        net.addRecurrentConnection(IdentityConnection(s1, l1))
        net.addRecurrentConnection(
            IdentityConnection(l2, l2, inSliceFrom=1, outSliceTo=2))
    net.sortModules()
    return net
コード例 #46
0
hidden_layer = LSTMLayer(5, name="hidden_layer")
out_layer = SoftmaxLayer(vec_engine.word_vec_dim, name="out_layer")

# Connecting between layers. And a special connection from out to hidden, that is the recurrent connection
conn_in_to_hid = FullConnection(in_layer, hidden_layer, name="in_to_hidden")
conn_hid_to_out = FullConnection(hidden_layer, out_layer, name="hidden_to_out")
recurrent_connection = FullConnection(hidden_layer,
                                      hidden_layer,
                                      name="recurrent")

# Putting everything together.
net.addInputModule(in_layer)
net.addModule(hidden_layer)
net.addOutputModule(out_layer)

net.addConnection(conn_in_to_hid)
net.addConnection(conn_hid_to_out)
net.addRecurrentConnection(recurrent_connection)

net.sortModules()

# Since our preprocessor_engine does stuff and writes output to output.txt,
# neural_engine takes its input from it
input_file = open('output.txt', 'r')

# each line in output.txt is a preprocessed token.
# Read line by line and remove endline character
input_tokens = input_file.readlines()
input_tokens = [t.strip() for t in input_tokens]
input_file.close()
コード例 #47
0
num_coeff = 26
max_freq = 8000
min_freq = 0
melArray = np.linspace(FEXT.freqToMel(min_freq), FEXT.freqToMel(max_freq),
                       num_coeff + 2)
ferqArray = FEXT.melToFreq(melArray)
freqArray_bin = np.floor(513 * ferqArray / 16000)
centralPoints = freqArray_bin[1:21]
freqbank = np.zeros((26, 257))

LSTMre = RecurrentNetwork()

LSTMre.addInputModule(LinearLayer(39, name='input'))
LSTMre.addModule(LSTMLayer(50, name='LSTM_hidden'))
LSTMre.addOutputModule(SoftmaxLayer(5, name='out'))
LSTMre.addConnection(
    FullConnection(LSTMre['input'], LSTMre['LSTM_hidden'], name='c1'))
LSTMre.addConnection(
    FullConnection(LSTMre['LSTM_hidden'], LSTMre['out'], name='c2'))
LSTMre.addRecurrentConnection(
    FullConnection(LSTMre['LSTM_hidden'], LSTMre['LSTM_hidden'], name='c3'))
LSTMre.sortModules()
ds = SupervisedDataSet(39, 5)

#ser.

for i in range(1, 27):
    start, center, stop = int(freqArray_bin[i - 1]), int(
        freqArray_bin[i]), int(freqArray_bin[i + 1])
    temp = np.zeros(257)
    ascending = np.linspace(0, 1, center - start + 1)
    descending = np.linspace(1, 0, stop - center + 1)
コード例 #48
0
from pybrain.structure import RecurrentNetwork
from pybrain.structure import FullConnection, LinearLayer, LSTMLayer
from parsemusic import ds
import random
print ds

layerCount = 10

net = RecurrentNetwork()
net.addInputModule(LinearLayer(10, name='in'))
for x in range(layerCount):
    net.addModule(LSTMLayer(20, name='hidden' + str(x)))
net.addOutputModule(LinearLayer(10, name='out'))
net.addConnection(FullConnection(net['in'], net['hidden1'], name='cIn'))
for x in range(layerCount - 1):
    net.addConnection(
        FullConnection(net[('hidden' + str(x))],
                       net['hidden' + str(x + 1)],
                       name=('c' + str(x + 1))))
net.addConnection(
    FullConnection(net['hidden' + str(layerCount - 1)],
                   net['out'],
                   name='cOut'))
net.sortModules()
from pybrain.supervised import RPropMinusTrainer
trainer = RPropMinusTrainer(net, dataset=ds)

epochcount = 0
while True:
    startingnote = random.choice(range(1, 17))
    startingnote2 = random.choice(range(1, 17))
コード例 #49
0
""" The former are the last slice of the latter. """

print(n.params[-3:] == hidden2out.params)

""" Ok, after having covered the basics, let's move on to some additional concepts.
First of all, we encourage you to name all modules, or connections you create, because that gives you
more readable printouts, and a very concise way of accessing them.

We now build an equivalent network to the one before, but with a more concise syntax:
"""
n2 = RecurrentNetwork(name='net2')
n2.addInputModule(LinearLayer(2, name='in'))
n2.addModule(SigmoidLayer(3, name='h'))
n2.addOutputModule(LinearLayer(1, name='out'))
n2.addConnection(FullConnection(n2['in'], n2['h'], name='c1'))
n2.addConnection(FullConnection(n2['h'], n2['out'], name='c2'))
n2.sortModules()

""" Printouts look more concise and readable: """
print(n2)

""" There is an even quicker way to build networks though, as long as their structure is nothing
more fancy than a stack of fully connected layers: """

n3 = buildNetwork(2, 3, 1, bias=False)

""" Recurrent networks are working in the same way, except that the recurrent connections
need to be explicitly declared upon construction.

We can modify our existing network 'net2' and add a recurrent connection on the hidden layer: """
コード例 #50
0
def exec_algo(xml_file, output_location):
    rootObj = ml.parse(xml_file)
    file_name = rootObj.MachineLearning.prediction.datafile
    file = open(file_name)
    var_input = rootObj.MachineLearning.prediction.input
    var_output = rootObj.MachineLearning.prediction.output
    var_classes = rootObj.MachineLearning.prediction.classes

    DS = ClassificationDataSet(var_input, var_output, nb_classes=var_classes)
    #DS1=ClassificationDataSet(13,1,nb_classes=10)

    for line in file.readlines():
        data = [float(x) for x in line.strip().split(',') if x != '']
        inp = tuple(data[:var_input])
        output = tuple(data[var_input:])
        DS.addSample(inp, output)

    tstdata, trndata = DS.splitWithProportion(0)
    #trndatatest,tstdatatest=DS1.splitWithProportion(0)

    trdata = ClassificationDataSet(trndata.indim, 1, nb_classes=10)
    #tsdata=ClassificationDataSet(DS1.indim,1,nb_classes=10)
    #tsdata1=ClassificationDataSet(DS1.indim,1,nb_classes=10)

    for i in xrange(trndata.getLength()):
        if (trndata.getSample(i)[1][0] != 100):
            trdata.addSample(trndata.getSample(i)[0], trndata.getSample(i)[1])

    trdata._convertToOneOfMany()
    #tsdata._convertToOneOfMany()
    #tsdata1._convertToOneOfMany()
    print "%d" % (trdata.getLength())

    rnn = RecurrentNetwork()
    inputLayer = LinearLayer(trdata.indim)

    hiddenLayer = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.hiddenLayerActivation
    hiddenNeurons = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.hiddenNeurons

    if hiddenLayer == 'Sigmoid':
        hiddenLayer = SigmoidLayer(hiddenNeurons)
    elif hiddenLayer == 'Softmax':
        hiddenLayer = SoftmaxLayer(hiddenNeurons)
    else:
        hiddenLayer = LinearLayer(hiddenNeurons)

    outputLayer = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.outputLayerActivation

    if outputLayer == 'Sigmoid':
        outputLayer = SigmoidLayer(trdata.outdim)
    elif outputLayer == 'Softmax':
        outputLayer = SoftmaxLayer(trdata.outdim)
    else:
        outputLayer = LinearLayer(trdata.outdim)

    rnn.addInputModule(inputLayer)
    rnn.addModule(hiddenLayer)
    rnn.addOutputModule(outputLayer)
    rnn_type = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.RNN_Type
    in_to_hidden = FullConnection(inputLayer, hiddenLayer)
    hidden_to_outputLayer = FullConnection(hiddenLayer, outputLayer)
    rnn.addConnection(in_to_hidden)
    rnn.addConnection(hidden_to_outputLayer)

    if rnn_type == 'Elman':
        hidden_to_hidden = FullConnection(hiddenLayer, hiddenLayer, name='c3')
        rnn.addRecurrentConnection(hidden_to_hidden)
    #hidden_to_hidden=FullConnection(hiddenLayer,hiddenLayer, name='c3')

    if rnn_type == 'Jordan':
        output_to_hidden = FullConnection(outputLayer, hiddenLayer, name='c3')
        rnn.addRecurrentConnection(output_to_hidden)

    #rnn.addRecurrentConnection(hidden_to_hidden)
    momentum = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.momentum
    weightdecay = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.learningRate
    rnn.sortModules()
    trainer = BackpropTrainer(rnn,
                              dataset=trdata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    trainer.train()
    result = (percentError(trainer.testOnClassData(dataset=trdata),
                           trdata['class']))
    #result1=percentError(trainer.testOnClassData(dataset=tsdata1),tsdata1['class'])

    print('%f \n') % (100 - result)
    #print ('%f \n') % (100-result1)

    ts = time.time()
    directory = output_location + sep + str(int(ts))
    makedirs(directory)
    fileObject = open(
        output_location + sep + str(int(ts)) + sep + 'pybrain_RNN', 'w')
    pickle.dump(trainer, fileObject)
    pickle.dump(rnn, fileObject)
    fileObject.close()
コード例 #51
0
import itertools
from pybrain.structure import RecurrentNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets import SupervisedDataSet

john = 1
bill = 2
sue = 3
mary = 4
love = 10
see = 11

ds = SupervisedDataSet(2, 1)

for verb in [love, see]:
    for a, b in itertools.combinations([john, bill, sue, mary]):
        ds.addSample((verb, a, b), (1, ))

n = RecurrentNetwork()
n.addInputModule(LinearLayer(3, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))

trainer = BackpropTrainer(n, ds)
trainer.train()
コード例 #52
0
ファイル: networks.py プロジェクト: Boblogic07/pybrain
""" The former are the last slice of the latter. """

print n.params[-3:] == hidden2out.params

""" Ok, after having covered the basics, let's move on to some additional concepts.
First of all, we encourage you to name all modules, or connections you create, because that gives you
more readable printouts, and a very concise way of accessing them.

We now build an equivalent network to the one before, but with a more concise syntax:
"""
n2 = RecurrentNetwork(name='net2')
n2.addInputModule(LinearLayer(2, name='in'))
n2.addModule(SigmoidLayer(3, name='h'))
n2.addOutputModule(LinearLayer(1, name='out'))
n2.addConnection(FullConnection(n2['in'], n2['h'], name='c1'))
n2.addConnection(FullConnection(n2['h'], n2['out'], name='c2'))
n2.sortModules()

""" Printouts look more concise and readable: """
print n2

""" There is an even quicker way to build networks though, as long as their structure is nothing
more fancy than a stack of fully connected layers: """

n3 = buildNetwork(2, 3, 1, bias=False)

""" Recurrent networks are working in the same way, except that the recurrent connections
need to be explicitly declared upon construction.

We can modify our existing network 'net2' and add a recurrent connection on the hidden layer: """
コード例 #53
0
ファイル: train.py プロジェクト: krishna23444/Projects
n.addInputModule(input1)
n.addModule(hidden1)
n.addModule(hidden2)
n.addModule(hidden3)
n.addModule(output1)
n.addOutputModule(output2)

conn1 = FullConnection(input1, hidden1)
conn2 = FullConnection(input1, hidden2)
conn3 = FullConnection(hidden1, hidden3)
conn4 = FullConnection(hidden2, hidden3)
conn5 = FullConnection(hidden3, output1)
conn6 = FullConnection(output1,output2)

n.addConnection(conn1)
n.addConnection(conn2)
n.addConnection(conn3)
n.addConnection(conn4)
n.addConnection(conn5)
n.addConnection(conn6)

n.sortModules()

trainer = BackpropTrainer( n, dataset=train, momentum=0.1, learningrate=0.02 , verbose=True) 

#trainer.trainUntilConvergence()
#NetworkWriter.writeToFile(n, 'g2p_10.xml')
trainer.trainEpochs(500)
print 'Percent Error on Test dataset: ' , percentError( trainer.testOnClassData (dataset=test ), test['target'] )
コード例 #54
0
ファイル: episodicSnP.py プロジェクト: samstern/MSc-Project
from pybrain.rl.environments.timeseries.timeseries import MonthlySnPEnvironment
from pybrain.rl.learners.directsearch.rrl import RRL

from pybrain.structure import RecurrentNetwork
from pybrain.structure import LinearLayer, SigmoidLayer, TanhLayer, BiasUnit
from pybrain.structure import FullConnection
from pybrain.rl.agents import LearningAgent
from pybrain.rl.experiments import EpisodicExperiment

from numpy import sign, round
from matplotlib import pyplot

net= RecurrentNetwork()
#Single linear layer with bias unit, and single tanh layer. the linear layer is whats optimised
net.addInputModule(BiasUnit(name='bias'))
net.addOutputModule(TanhLayer(1, name='out'))
net.addRecurrentConnection(FullConnection(net['out'], net['out'], name='c3'))
net.addInputModule(LinearLayer(1,name='in'))
net.addConnection(FullConnection(net['in'],net['out'],name='c1'))
net.addConnection((FullConnection(net['bias'],net['out'],name='c2')))
net.sortModules()
net._setParameters([-8.79227886e-02, -8.29319017e+02, 1.25946474e+00])
print(net._params)
env=MonthlySnPEnvironment()
task=MaximizeReturnTask(env)
learner = RRL() # ENAC() #Q_LinFA(2,1)
agent = LearningAgent(net,learner)
exp=EpisodicExperiment(task,agent)

exp.doEpisodes(10)
コード例 #55
0
print('  LEARNINGRATE:', LEARNINGRATE)
print('  MOMENTUM:', MOMENTUM)
print('====================================')
print('====================================')

# Prepare recurrent network
net = RecurrentNetwork()

# Add layers
net.addInputModule(LinearLayer(N * N, name='in'))
for layer in range(1, HIDDENLAYERS + 1):
    net.addModule(SigmoidLayer(N * N, name='hidden' + str(layer)))
net.addOutputModule(TanhLayer(N * N, name='out'))

# Add connections between layers
net.addConnection(FullConnection(net['in'], net['hidden1']))
for layer in range(1, HIDDENLAYERS):
    net.addConnection(
        FullConnection(net['hidden' + str(layer)],
                       net['hidden' + str(layer + 1)]))
net.addConnection(FullConnection(net['hidden' + str(HIDDENLAYERS)],
                                 net['out']))
net.addRecurrentConnection(
    FullConnection(net['hidden' + str(HIDDENLAYERS)], net['hidden1']))
net.sortModules()

# Trainer
trainer = BackpropTrainer(net,
                          dataset=trainingData,
                          learningrate=LEARNINGRATE,
                          momentum=MOMENTUM,
コード例 #56
0
ファイル: pyb_ann_const.py プロジェクト: pcolo/regret
DS = SupervisedDataSet(4, 1)
for i in range(0, Y.size):
        DS.addSample((X[i][0], X[i][1], X[i][2], X[i][3]), (float(Y[i]),))

## ----------------------- ANN ---------------------------- ##

from pybrain.structure import RecurrentNetwork
n = RecurrentNetwork()

from pybrain.structure import LinearLayer, SigmoidLayer
from pybrain.structure import FullConnection

n.addInputModule(SigmoidLayer(4, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

n.sortModules() #initialisation


## ----------------------- Trainer ---------------------------- ##

from pybrain.supervised.trainers import BackpropTrainer

tstdata, trndata = DS.splitWithProportion(0.25)

# print len(tstdata)
# print len(trndata)

trainer = BackpropTrainer(n, DS, learningrate=0.1, momentum=0.5, weightdecay=0.0001)
コード例 #57
0
ファイル: RNNScript.py プロジェクト: matthp/WorkExamples
conglomerateString = []

# Construct LSTM network
rnn = RecurrentNetwork()

inputSize = len(codeTable['a'].values)
outputSize = 4
hiddenSize = 10

rnn.addInputModule(LinearLayer(dim=inputSize, name='in'))
rnn.addModule(TanhLayer(dim=hiddenSize, name='in_proc'))
rnn.addModule(LSTMLayer(dim=hiddenSize, peepholes=True, name='hidden'))
rnn.addModule(TanhLayer(dim=hiddenSize, name='out_proc'))
rnn.addOutputModule(SoftmaxLayer(dim=outputSize, name='out'))

rnn.addConnection(FullConnection(rnn['in'], rnn['in_proc'], name='c1'))
rnn.addConnection(FullConnection(rnn['in_proc'], rnn['hidden'], name='c2'))
rnn.addRecurrentConnection(
    FullConnection(rnn['hidden'], rnn['hidden'], name='c3'))
rnn.addConnection(FullConnection(rnn['hidden'], rnn['out_proc'], name='c4'))
rnn.addConnection(FullConnection(rnn['out_proc'], rnn['out'], name='c5'))

rnn.sortModules()

# Construct dataset
trainingData = SequentialDataSet(inputSize, outputSize)

for index, row in df.iterrows():
    trainingData.newSequence()
    inputSequence = list((row.values)[0])