コード例 #1
0
ファイル: models.py プロジェクト: mMaxy/ai-builder
    def buildNN(self, net, functions, inp, out):
        layers = []

        inLayer = self.func[functions[0]](inp)
        layers.append(inLayer)
        outLayer = self.func[functions[-1]](out)

        for neural in range(1, len(net) - 1):
            layers.append(self.func[functions[neural]](1))
        layers.append(outLayer)

        connections, recConnections = self.fillConnections(net, [], [0], layers)
        if len(recConnections) == 0:
            n = FeedForwardNetwork()
        else:
            n = RecurrentNetwork()
        n.addInputModule(inLayer)
        for layer in range(1, len(layers) - 1):
            n.addModule(layers[layer])
        n.addOutputModule(outLayer)

        for con in connections:
            n.addConnection(con)
        for rcon in recConnections:
            n.addRecurrentConnection(rcon)
        n.sortModules()
        return n
コード例 #2
0
def getNetwork(trndata):
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(trndata.indim, name='in'))
    n.addModule(SigmoidLayer(100, name='hidden'))
    n.addOutputModule(LinearLayer(trndata.outdim, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.addRecurrentConnection(
        FullConnection(n['hidden'], n['hidden'], name='c3'))
    n.sortModules()

    # fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer(n,
                              dataset=trndata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)

    # TODO: return network and trainer here. Make another function for training
    # for i in range(20):
    # trainer.trainEpochs(1)
    # trainer.trainUntilConvergence(maxEpochs=100)

    # trnresult = percentError( trainer.testOnClassData(),trndata['class'] )
    # tstresult = percentError( trainer.testOnClassData(dataset=tstdata ), tstdata['class'] )

    # print "epoch: %4d" % trainer.totalepochs, \
    # 	"  train error: %5.2f%%" % trnresult

    # out = fnn.activateOnDataset(tstdata)
    # out = out.argmax(axis=1)  # the highest output activation gives the class
    return (n, trainer)
コード例 #3
0
def buildNonGravityNet(recurrent=False):
    if recurrent:
        net = RecurrentNetwork()
    else:
        net = FeedForwardNetwork()
    l1 = LinearLayer(2)
    l2 = LinearLayer(3)
    s1 = SigmoidLayer(2)
    l3 = LinearLayer(1)
    net.addInputModule(l1)
    net.addModule(l2)
    net.addModule(s1)
    net.addOutputModule(l3)
    net.addConnection(IdentityConnection(l1, l2, outSliceFrom=1))
    net.addConnection(IdentityConnection(l1, l2, outSliceTo=2))
    net.addConnection(IdentityConnection(l2, l3, inSliceFrom=2))
    net.addConnection(IdentityConnection(l2, l3, inSliceTo=1))
    net.addConnection(IdentityConnection(l1, s1))
    net.addConnection(IdentityConnection(l2, s1, inSliceFrom=1))
    net.addConnection(IdentityConnection(s1, l3, inSliceFrom=1))
    if recurrent:
        net.addRecurrentConnection(IdentityConnection(s1, l1))
        net.addRecurrentConnection(
            IdentityConnection(l2, l2, inSliceFrom=1, outSliceTo=2))
    net.sortModules()
    return net
コード例 #4
0
def trainFunc(params):
    iter, trainds, validds, input_size, hidden, func, eta, lmda, epochs = params
    print('Iter:', iter, 'Epochs:', epochs, 'Hidden_size:', hidden, 'Eta:',
          eta, 'Lamda:', lmda, 'Activation:', func)

    # Build network
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(func(hidden, name='hidden'))
    n.addModule(LinearLayer(hidden, name='context'))
    n.addOutputModule(LinearLayer(1, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='in_to_hidden'))
    n.addConnection(FullConnection(n['hidden'], n['out'],
                                   name='hidden_to_out'))
    n.addRecurrentConnection(FullConnection(n['hidden'], n['context']))
    rnet = n
    rnet.sortModules()

    trainer = BackpropTrainer(n,
                              trainds,
                              learningrate=eta,
                              weightdecay=lmda,
                              momentum=0.1,
                              shuffle=False)
    trainer.trainEpochs(epochs)
    pred = np.nan_to_num(n.activateOnDataset(validds))
    validerr = eval.calc_RMSE(validds['target'], pred)
    varscore = explained_variance_score(validds['target'], pred)
    return validerr, varscore, n
コード例 #5
0
ファイル: main.py プロジェクト: magnastrazh/NEUCOGAR
def trainedRNN():
    n = RecurrentNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.addRecurrentConnection(NMConnection(n['out'], n['out'], name='nmc'))
    # n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], inSliceFrom = 0, inSliceTo = 1, outSliceFrom = 0, outSliceTo = 3))
    n.sortModules()

    draw_connections(n)
    d = getDatasetFromFile(root.path() + "/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count += 1
        if count == 50:
            return trainedRNN()
    # exportRNN(n)
    draw_connections(n)

    return n
コード例 #6
0
ファイル: main.py プロジェクト: magnastrazh/NEUCOGAR
def trained_cat_dog_RFCNN():
    n = RecurrentNetwork()

    d = get_cat_dog_trainset()
    input_size = d.getDimension('input')
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(SigmoidLayer(input_size + 1500, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
    n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))
    n.sortModules()

    t = BackpropTrainer(n, d, learningrate=0.0001)  #, momentum=0.75)

    count = 0
    while True:
        globErr = t.train()
        print globErr
        count += 1
        if globErr < 0.01:
            break
        if count == 30:
            break

    exportCatDogRFCNN(n)
    return n
コード例 #7
0
def runNeuralSimulation(dataTrain, dataTest, train_tfidf, test_tfidf):
    outFile = open('neuralLog.txt','a')
    outFile.write('-------------------------------------\n')
    outFile.write('train==> %d, %d \n'%(train_tfidf.shape[0],train_tfidf.shape[1]))
    outFile.write('test==>  %d, %d \n'%(test_tfidf.shape[0],test_tfidf.shape[1]))
    
    trainDS = getDataSetFromTfidf(train_tfidf, dataTrain.target)
    testDS = getDataSetFromTfidf(test_tfidf, dataTest.target)
    
    print "Number of training patterns: ", len(trainDS)
    print "Input and output dimensions: ", trainDS.indim, trainDS.outdim
    print "First sample (input, target, class):"
    print len(trainDS['input'][0]), trainDS['target'][0], trainDS['class'][0]
    
#     with SimpleTimer('time to train', outFile):
#         net = buildNetwork(trainDS.indim, trainDS.indim/2, trainDS.indim/4, trainDS.indim/8, trainDS.indim/16, 2, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
#         trainer = BackpropTrainer( net, dataset=trainDS, momentum=0.1, verbose=True, weightdecay=0.01, batchlearning=True)
    net = RecurrentNetwork()
    net.addInputModule(LinearLayer(trainDS.indim, name='in'))
    net.addModule(SigmoidLayer(trainDS.indim/2, name='hidden'))
    net.addModule(SigmoidLayer(trainDS.indim/4, name='hidden2'))
    net.addOutputModule(SoftmaxLayer(2, name='out'))
    net.addConnection(FullConnection(net['in'], net['hidden'], name='c1'))
    net.addConnection(FullConnection(net['hidden'], net['out'], name='c2'))
    net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden'], name='c3'))
    net.addRecurrentConnection(FullConnection(net['hidden2'], net['hidden'], name='c4'))
    net.sortModules()
    trainer = BackpropTrainer( net, dataset=trainDS, momentum=0.01, verbose=True, weightdecay=0.01)
    
    outFile.write('%s \n' % (net.__str__()))
    epochs = 2000
    with SimpleTimer('time to train %d epochs' % epochs, outFile):
        for i in range(epochs):
            trainer.trainEpochs(1)
            trnresult = percentError( trainer.testOnClassData(),
                                  trainDS['class'] )
            tstresult = percentError( trainer.testOnClassData(
               dataset=testDS ), testDS['class'] )
    
            print "epoch: %4d" % trainer.totalepochs, \
                  "  train error: %5.2f%%" % trnresult, \
                  "  test error: %5.2f%%" % tstresult
            outFile.write('%5.2f , %5.2f \n' % (100.0-trnresult, 100.0-tstresult))
                  
    predicted = trainer.testOnClassData(dataset=testDS)
    results = predicted == testDS['class'].flatten()
    wrong = []
    for i in range(len(results)):
        if not results[i]:
            wrong.append(i)
    print 'classifier got these wrong:'
    for i in wrong[:10]:
        print dataTest.data[i], dataTest.target[i]
        outFile.write('%s %d \n' % (dataTest.data[i], dataTest.target[i]))
コード例 #8
0
def buildMinimalLSTMNetwork():
    N = RecurrentNetwork('simpleLstmNet')
    i = LinearLayer(4, name='i')
    h = LSTMLayer(1, peepholes=True, name='lstm')
    o = LinearLayer(1, name='o')
    N.addInputModule(i)
    N.addModule(h)
    N.addOutputModule(o)
    N.addConnection(IdentityConnection(i, h))
    N.addConnection(IdentityConnection(h, o))
    N.sortModules()
    return N
コード例 #9
0
def buildMinimalMDLSTMNetwork():
    N = RecurrentNetwork('simpleMdLstmNet')
    i = LinearLayer(4, name = 'i')
    h = MDLSTMLayer(1, peepholes = True, name = 'mdlstm')
    o = LinearLayer(1, name = 'o')
    N.addInputModule(i)
    N.addModule(h)
    N.addOutputModule(o)
    N.addConnection(IdentityConnection(i, h, outSliceTo = 4))
    N.addRecurrentConnection(IdentityConnection(h, h, outSliceFrom = 4, inSliceFrom = 1))
    N.addConnection(IdentityConnection(h, o, inSliceTo = 1))
    N.sortModules()
    return N
コード例 #10
0
ファイル: tako.py プロジェクト: Reedy-C/garden
 def __init__(self, dire, x, y, genome):
     self.direction = dire
     self.x = x
     self.y = y
     self.genome = genome
     self.hunger = 150
     self.boredom = 150
     self.pain = 0
     self.last_hunger = 150
     self.last_boredom = 150
     self.last_pain = 0
     self.last_obj = None
     #self.net = FeedForwardNetwork()
     self.net = RecurrentNetwork()
     self.net.sequential = False
コード例 #11
0
def buildMixedNestedNetwork():
    """ build a nested network with the inner one being a ffn and the outer one being recurrent. """
    N = RecurrentNetwork('outer')
    a = LinearLayer(1, name='a')
    b = LinearLayer(2, name='b')
    c = buildNetwork(2, 3, 1)
    c.name = 'inner'
    N.addInputModule(a)
    N.addModule(c)
    N.addOutputModule(b)
    N.addConnection(FullConnection(a, b))
    N.addConnection(FullConnection(b, c))
    N.addRecurrentConnection(FullConnection(c, c))
    N.sortModules()
    return N
コード例 #12
0
def buildSimpleLSTMNetwork(peepholes=False):
    N = RecurrentNetwork('simpleLstmNet')
    i = LinearLayer(100, name='i')
    h = LSTMLayer(10, peepholes=peepholes, name='lstm')
    o = LinearLayer(1, name='o')
    b = BiasUnit('bias')
    N.addModule(b)
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(FullConnection(i, h, name='f1'))
    N.addConnection(FullConnection(b, h, name='f2'))
    N.addRecurrentConnection(FullConnection(h, h, name='r1'))
    N.addConnection(FullConnection(h, o, name='r1'))
    N.sortModules()
    return N
コード例 #13
0
ファイル: network.py プロジェクト: jmoles/trail-runner
    def createJeffersonStyleNetwork(in_count=2,
                                    hidden_count=5,
                                    output_count=4,
                                    recurrent=True,
                                    in_to_out_connect=True,
                                    name=None):
        """
        Creates a Jefferson-esque neural network for trail problem.


        Returns:
            pybrain.network. The neural network.

        """

        if recurrent:
            ret_net = RecurrentNetwork(name=name)
        else:
            ret_net = FeedForwardNetwork(name=name)

        in_layer = LinearLayer(in_count, name="food")
        hidden_layer = SigmoidLayer(hidden_count, name="hidden")
        output_layer = LinearLayer(output_count, name="move")

        ret_net.addInputModule(in_layer)
        ret_net.addModule(hidden_layer)
        ret_net.addOutputModule(output_layer)

        in_to_hidden = FullConnection(in_layer, hidden_layer)
        hidden_to_out = FullConnection(hidden_layer, output_layer)

        ret_net.addConnection(in_to_hidden)
        ret_net.addConnection(hidden_to_out)

        if in_to_out_connect:
            in_to_out = FullConnection(in_layer, output_layer)
            ret_net.addConnection(in_to_out)

        if recurrent:
            hidden_to_hidden = FullConnection(hidden_layer, hidden_layer)
            ret_net.addRecurrentConnection(hidden_to_hidden)

        ret_net.sortModules()

        return ret_net
コード例 #14
0
ファイル: parityrnn.py プロジェクト: wsgan001/AI
def buildParityNet():
    net = RecurrentNetwork()
    net.addInputModule(LinearLayer(1, name = 'i'))
    net.addModule(TanhLayer(2, name = 'h'))
    net.addModule(BiasUnit('bias'))
    net.addOutputModule(TanhLayer(1, name = 'o'))
    net.addConnection(FullConnection(net['i'], net['h']))
    net.addConnection(FullConnection(net['bias'], net['h']))
    net.addConnection(FullConnection(net['bias'], net['o']))
    net.addConnection(FullConnection(net['h'], net['o']))
    net.addRecurrentConnection(FullConnection(net['o'], net['h']))
    net.sortModules()

    p = net.params
    p[:] = [-0.5, -1.5, 1, 1, -1, 1, 1, -1, 1]
    p *= 10.

    return net
コード例 #15
0
def network(dataset, input_list):
    num_words = len(input_list)
    #dividing the dataset into training and testing data
    tstdata, trndata = dataset.splitWithProportion(0.25)

    #building the network
    net = RecurrentNetwork()
    input_layer1 = LinearLayer(num_words, name='input_layer1')
    input_layer2 = LinearLayer(num_words, name='input_layer2')
    hidden_layer = TanhLayer(num_words, name='hidden_layer')
    output_layer = SoftmaxLayer(num_words, name='output_layer')
    net.addInputModule(input_layer1)
    net.addInputModule(input_layer2)
    net.addModule(hidden_layer)
    net.addOutputModule(output_layer)
    net.addConnection(
        FullConnection(input_layer1, hidden_layer, name='in1_to_hidden'))
    net.addConnection(
        FullConnection(input_layer2, hidden_layer, name='in2_to_hidden'))
    net.addConnection(
        FullConnection(hidden_layer, output_layer, name='hidden_to_output'))
    net.addConnection(
        FullConnection(input_layer1, output_layer, name='in1_to_out'))
    net.addConnection(
        FullConnection(input_layer2, output_layer, name='in2_to_out'))
    net.sortModules()
    #backpropagation
    trainer = BackpropTrainer(net,
                              dataset=trndata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    #error checking part
    for i in range(10):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(), trndata['target'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                                 tstdata['target'])
        print "epoch: %4d" % trainer.totalepochs
        print "  train error: %5.10f%%" % trnresult
        print "  test error: %5.10f%%" % tstresult
    return net
コード例 #16
0
ファイル: backpropanbncn.py プロジェクト: wsgan001/AI
def testTraining():
    # the AnBnCn dataset (sequential)
    d = AnBnCnDataSet()

    # build a recurrent network to be trained
    hsize = 2
    n = RecurrentNetwork()
    n.addModule(TanhLayer(hsize, name='h'))
    n.addModule(BiasUnit(name='bias'))
    n.addOutputModule(LinearLayer(1, name='out'))
    n.addConnection(FullConnection(n['bias'], n['h']))
    n.addConnection(FullConnection(n['h'], n['out']))
    n.addRecurrentConnection(FullConnection(n['h'], n['h']))
    n.sortModules()

    # initialize the backprop trainer and train
    t = BackpropTrainer(n, learningrate=0.1, momentum=0.0, verbose=True)
    t.trainOnDataset(d, 200)

    # the resulting weights are in the network:
    print('Final weights:', n.params)
コード例 #17
0
def buildNetwork(hidden_layer=3):
    #build the network
    #create the layers
    input_layer = LinearLayer(4)
    hidden_layer = SigmoidLayer(hidden_layer, name='hidden')
    output_layer = LinearLayer(2)
    net = RecurrentNetwork()

    net.addInputModule(input_layer)
    net.addModule(hidden_layer)
    net.addOutputModule(output_layer)

    in_to_hidden = FullConnection(input_layer, hidden_layer)
    hidden_to_out = FullConnection(hidden_layer, output_layer)
    net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden']))

    net.addConnection(in_to_hidden)
    net.addConnection(hidden_to_out)

    net.sortModules()
    return net
コード例 #18
0
ファイル: main.py プロジェクト: magnastrazh/NEUCOGAR
def trainedRFCNN():
    n = RecurrentNetwork()

    n.addInputModule(LinearLayer(4, name='in'))
    n.addModule(SigmoidLayer(6, name='hidden'))
    n.addOutputModule(LinearLayer(2, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
    n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

    n.addRecurrentConnection(FullConnection(n['out'], n['hidden'], name='nmc'))

    n.sortModules()

    draw_connections(n)
    # d = generateTraininqgData()
    d = getDatasetFromFile(root.path() + "/res/dataSet")
    t = BackpropTrainer(n, d, learningrate=0.001, momentum=0.75)
    t.trainOnDataset(d)
    # FIXME: I'm not sure the recurrent ANN is going to converge
    # so just training for fixed number of epochs

    count = 0
    while True:
        globErr = t.train()
        print globErr
        if globErr < 0.01:
            break
        count = count + 1
        if (count == 100):
            return trainedRFCNN()

    # for i in range(100):
    #     print t.train()

    exportRFCNN(n)
    draw_connections(n)

    return n
コード例 #19
0
def getModel(dept, hidden_size, input_size, target_size, online = False,):

	file_name = output_file_path + 'nn_dept' + str(dept) + '_epoch' + str(epochs)
	
	if online == True:
		try:
			fileObject = open(file_name + '_model', 'r')
			n = pickle.load(fileObject)
			fileObject.close()
			return n
		
		except IOError:
			print "There is no nn object for dept", dept, "exits, So a new model is built."
			pass

	n = RecurrentNetwork()

	n.addInputModule(LinearLayer(input_size, name='in'))
	n.addModule(BiasUnit('bias'))
	for i in range(0, num_hidden_layer+1):
		hidden_name = 'hidden'+str(i)
		n.addModule(SigmoidLayer(hidden_size, name=hidden_name))
	n.addOutputModule(LinearLayer(target_size, name='out'))

	n.addConnection(FullConnection(n['in'], n['hidden0'], name='c1'))
	next_hidden = 'hidden0'

	for i in range(0,num_hidden_layer ):
		current_hidden = 'hidden'+str(i)
		next_hidden = 'hidden'+str(i+1)
		n.addConnection(FullConnection(n[current_hidden], n[next_hidden], name='c'+str(i+2)))

	n.addConnection(FullConnection(n[next_hidden], n['out'], name='c'+str(num_hidden_layer+2)))
	n.addConnection(FullConnection(n['bias'], n['hidden0'], name='c'+str(num_hidden_layer+7)))

	n.sortModules()

	return n
コード例 #20
0
def construct_network(input_len, output_len, hidden_nodes, is_elman=True):
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(input_len, name="i"))
    n.addModule(BiasUnit("b"))
    n.addModule(SigmoidLayer(hidden_nodes, name="h"))
    n.addOutputModule(LinearLayer(output_len, name="o"))

    n.addConnection(FullConnection(n["i"], n["h"]))
    n.addConnection(FullConnection(n["b"], n["h"]))
    n.addConnection(FullConnection(n["b"], n["o"]))
    n.addConnection(FullConnection(n["h"], n["o"]))

    if is_elman:
        # Elman (hidden->hidden)
        n.addRecurrentConnection(FullConnection(n["h"], n["h"]))
    else:
        # Jordan (out->hidden)
        n.addRecurrentConnection(FullConnection(n["o"], n["h"]))

    n.sortModules()
    n.reset()

    return n
コード例 #21
0
ファイル: autoenc.py プロジェクト: TheMarex/autoenc
def buildNetwork(N):
    dimension = WINDOW_SIZE
    inLayer = LinearLayer(dimension)
    hiddenLayer = SigmoidLayer(N)
    outLayer = LinearLayer(dimension)
    # bias disabled, too much over training
    #bias = BiasUnit(name='bias')
    in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outLayer)
    #bias_to_out = FullConnection(bias, outLayer)
    #bias_to_hidden = FullConnection(bias, hiddenLayer)

    net = RecurrentNetwork()
    #net.addModule(bias)
    net.addInputModule(inLayer)
    net.addModule(hiddenLayer)
    net.addOutputModule(outLayer)
    net.addConnection(in_to_hidden)
    net.addConnection(hidden_to_out)
    net.addRecurrentConnection(FullConnection(hiddenLayer, hiddenLayer))
    #net.addConnection(bias_to_hidden)
    #net.addConnection(bias_to_out)
    net.sortModules()
    return net
コード例 #22
0
#!/usr/bin/python
#descripcion y ensamble de la red RecurrentNetwork paso a paso

from pybrain.structure import RecurrentNetwork
#estructuras FeedForwardNetwork and RecurrentNetwork

from pybrain.structure import LinearLayer, SigmoidLayer, TanhLayer
#modulos BiasUnit,GaussianLayer, LinearLayer, LSTMLayer, MDLSTMLayer, SigmoidLayer, SoftmaxLayer, StateDependentLayer, TanhLayer

from pybrain.structure import FullConnection
#conectores

n = RecurrentNetwork(name='red de prueba1')  #n = red

n.addInputModule(LinearLayer(
    2,
    name='entradas'))  #agrega el modulo de 'entrada' a la red con 2 entradas
n.addModule(SigmoidLayer(3, name='ocultas')
            )  #agrega el modulo de 'oculta' a la red con 3 capas ocultas
#n.addModule(TanhLayer(3, name = 'ocultas'))#agrega el modulo de 'oculta' a la red con 3 capas ocultas
n.addOutputModule(LinearLayer(
    1, name='salidas'))  #agrega el modulo de 'salida' a la red con 1 salida

#flujo de informacion entre las capas
n.addConnection(
    FullConnection(n['entradas'], n['ocultas'], name='con1')
)  #agrega y conecta el modulo de conexion de capa 'entrada' a la red con las capas ocultas
n.addConnection(
    FullConnection(n['ocultas'], n['salidas'], name='con2')
)  #agrega y conecta el modulo de conexion de capa 'salida' a la red con las capas ocultas
コード例 #23
0
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

duration = 1

num_coeff = 26
max_freq = 8000
min_freq = 0
melArray = np.linspace(FEXT.freqToMel(min_freq), FEXT.freqToMel(max_freq),
                       num_coeff + 2)
ferqArray = FEXT.melToFreq(melArray)
freqArray_bin = np.floor(513 * ferqArray / 16000)
centralPoints = freqArray_bin[1:21]
freqbank = np.zeros((26, 257))

LSTMre = RecurrentNetwork()

LSTMre.addInputModule(LinearLayer(39, name='input'))
LSTMre.addModule(LSTMLayer(50, name='LSTM_hidden'))
LSTMre.addOutputModule(SoftmaxLayer(5, name='out'))
LSTMre.addConnection(
    FullConnection(LSTMre['input'], LSTMre['LSTM_hidden'], name='c1'))
LSTMre.addConnection(
    FullConnection(LSTMre['LSTM_hidden'], LSTMre['out'], name='c2'))
LSTMre.addRecurrentConnection(
    FullConnection(LSTMre['LSTM_hidden'], LSTMre['LSTM_hidden'], name='c3'))
LSTMre.sortModules()
ds = SupervisedDataSet(39, 5)

#ser.
コード例 #24
0
def exec_algo(xml_file, output_location):
    rootObj = ml.parse(xml_file)
    file_name = rootObj.MachineLearning.prediction.datafile
    file = open(file_name)
    var_input = rootObj.MachineLearning.prediction.input
    var_output = rootObj.MachineLearning.prediction.output
    var_classes = rootObj.MachineLearning.prediction.classes

    DS = ClassificationDataSet(var_input, var_output, nb_classes=var_classes)
    #DS1=ClassificationDataSet(13,1,nb_classes=10)

    for line in file.readlines():
        data = [float(x) for x in line.strip().split(',') if x != '']
        inp = tuple(data[:var_input])
        output = tuple(data[var_input:])
        DS.addSample(inp, output)

    tstdata, trndata = DS.splitWithProportion(0)
    #trndatatest,tstdatatest=DS1.splitWithProportion(0)

    trdata = ClassificationDataSet(trndata.indim, 1, nb_classes=10)
    #tsdata=ClassificationDataSet(DS1.indim,1,nb_classes=10)
    #tsdata1=ClassificationDataSet(DS1.indim,1,nb_classes=10)

    for i in xrange(trndata.getLength()):
        if (trndata.getSample(i)[1][0] != 100):
            trdata.addSample(trndata.getSample(i)[0], trndata.getSample(i)[1])

    trdata._convertToOneOfMany()
    #tsdata._convertToOneOfMany()
    #tsdata1._convertToOneOfMany()
    print "%d" % (trdata.getLength())

    rnn = RecurrentNetwork()
    inputLayer = LinearLayer(trdata.indim)

    hiddenLayer = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.hiddenLayerActivation
    hiddenNeurons = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.hiddenNeurons

    if hiddenLayer == 'Sigmoid':
        hiddenLayer = SigmoidLayer(hiddenNeurons)
    elif hiddenLayer == 'Softmax':
        hiddenLayer = SoftmaxLayer(hiddenNeurons)
    else:
        hiddenLayer = LinearLayer(hiddenNeurons)

    outputLayer = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.outputLayerActivation

    if outputLayer == 'Sigmoid':
        outputLayer = SigmoidLayer(trdata.outdim)
    elif outputLayer == 'Softmax':
        outputLayer = SoftmaxLayer(trdata.outdim)
    else:
        outputLayer = LinearLayer(trdata.outdim)

    rnn.addInputModule(inputLayer)
    rnn.addModule(hiddenLayer)
    rnn.addOutputModule(outputLayer)
    rnn_type = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.RNN_Type
    in_to_hidden = FullConnection(inputLayer, hiddenLayer)
    hidden_to_outputLayer = FullConnection(hiddenLayer, outputLayer)
    rnn.addConnection(in_to_hidden)
    rnn.addConnection(hidden_to_outputLayer)

    if rnn_type == 'Elman':
        hidden_to_hidden = FullConnection(hiddenLayer, hiddenLayer, name='c3')
        rnn.addRecurrentConnection(hidden_to_hidden)
    #hidden_to_hidden=FullConnection(hiddenLayer,hiddenLayer, name='c3')

    if rnn_type == 'Jordan':
        output_to_hidden = FullConnection(outputLayer, hiddenLayer, name='c3')
        rnn.addRecurrentConnection(output_to_hidden)

    #rnn.addRecurrentConnection(hidden_to_hidden)
    momentum = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.momentum
    weightdecay = rootObj.MachineLearning.prediction.algorithm.RecurrentNeuralNetwork.learningRate
    rnn.sortModules()
    trainer = BackpropTrainer(rnn,
                              dataset=trdata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    trainer.train()
    result = (percentError(trainer.testOnClassData(dataset=trdata),
                           trdata['class']))
    #result1=percentError(trainer.testOnClassData(dataset=tsdata1),tsdata1['class'])

    print('%f \n') % (100 - result)
    #print ('%f \n') % (100-result1)

    ts = time.time()
    directory = output_location + sep + str(int(ts))
    makedirs(directory)
    fileObject = open(
        output_location + sep + str(int(ts)) + sep + 'pybrain_RNN', 'w')
    pickle.dump(trainer, fileObject)
    pickle.dump(rnn, fileObject)
    fileObject.close()
コード例 #25
0
 def __init__(self, trained_net=None):
     if trained_net == None:
         self.net = RecurrentNetwork()
         self.init_network(self.net)
     else:
         self.net = trained_net
コード例 #26
0
all weights of the network at once. """

print(hidden2out.params)
print(n.params)

""" The former are the last slice of the latter. """

print(n.params[-3:] == hidden2out.params)

""" Ok, after having covered the basics, let's move on to some additional concepts.
First of all, we encourage you to name all modules, or connections you create, because that gives you
more readable printouts, and a very concise way of accessing them.

We now build an equivalent network to the one before, but with a more concise syntax:
"""
n2 = RecurrentNetwork(name='net2')
n2.addInputModule(LinearLayer(2, name='in'))
n2.addModule(SigmoidLayer(3, name='h'))
n2.addOutputModule(LinearLayer(1, name='out'))
n2.addConnection(FullConnection(n2['in'], n2['h'], name='c1'))
n2.addConnection(FullConnection(n2['h'], n2['out'], name='c2'))
n2.sortModules()

""" Printouts look more concise and readable: """
print(n2)

""" There is an even quicker way to build networks though, as long as their structure is nothing
more fancy than a stack of fully connected layers: """

n3 = buildNetwork(2, 3, 1, bias=False)
コード例 #27
0
 def equivalence_recurrent(self, builder):
     _net = pybrainbridge._RecurrentNetwork()
     builder(_net)
     net = RecurrentNetwork()
     builder(net)
     super(TestNetworkEquivalence, self).equivalence_recurrent(net, _net)
コード例 #28
0
ファイル: network.py プロジェクト: jmoles/trail-runner
    def createJeffersonMDLNetwork(mdl_length=2,
                                  hidden_count=5,
                                  output_count=4,
                                  in_to_out_connect=True,
                                  name=None):

        ret_net = RecurrentNetwork(name=name)

        # Add some components of the neural network.
        hidden_layer = SigmoidLayer(hidden_count, name="hidden")
        output_layer = LinearLayer(output_count, name="move")

        ret_net.addModule(hidden_layer)
        ret_net.addOutputModule(output_layer)

        ret_net.addConnection(
            FullConnection(hidden_layer,
                           output_layer,
                           name="Hidden to Move Layer"))

        mdl_prev = ()

        for idx in range(0, mdl_length):
            # Create the layers
            food_layer = LinearLayer(2, name="Food {0}".format(idx))
            mdl_layer = LinearLayer(2, name="MDL Layer {0}".format(idx))

            # Add to network
            ret_net.addModule(food_layer)
            if idx == 0:
                ret_net.addInputModule(mdl_layer)
            else:
                ret_net.addModule(mdl_layer)
                # Add delay line connection.
                ret_net.addRecurrentConnection(
                    FullConnection(mdl_prev,
                                   mdl_layer,
                                   name="Recurrent DL {0} to DL {1}".format(
                                       idx - 1, idx)))

            # Add connections for
            # - Delay line to NN.
            # - NN to Hidden.
            # - NN to Out (if desired).
            ret_net.addConnection(
                FullConnection(mdl_layer,
                               food_layer,
                               name="DL {0} to Food {0}".format(idx)))
            ret_net.addConnection(
                FullConnection(food_layer,
                               hidden_layer,
                               name="Food {0} to Hidden".format(idx)))
            if in_to_out_connect:
                ret_net.addConnection(
                    FullConnection(food_layer,
                                   output_layer,
                                   name="Food {0} to Output".format(idx)))

            mdl_prev = mdl_layer

        ret_net.sortModules()

        return ret_net
コード例 #29
0
n.addOutputModule(outLayer)

#   Full Connection class - add connections/synapses

in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)
#makes our MLP usable,
n.sortModules()
print n.activate([1, 2])

print n

#Recureent Connection Class -which looks back in time one timestep.
n = RecurrentNetwork()
n.addInputModule(LinearLayer(2, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

n.sortModules()
print n.activate((2, 2))
print n.activate((2, 2))
print n.activate((2, 2))
n.reset()
print n.activate((2, 2))

#######################################
#########   Classification with feed forward networks