コード例 #1
0
    def buildLSTMNetwork(self):
        # create network and modules
        net = RecurrentNetwork()
        inp = LinearLayer(self.n_input, name="Input")
        h1 = LSTMLayer(3, name='LSTM')
        h2 = SigmoidLayer(10, name='sigm')
        outp = LinearLayer(self.numActions, name='output')
        # add modules
        net.addOutputModule(outp)
        net.addInputModule(inp)
        net.addModule(h1)
        net.addModule(h2)
        # create connections from input
        net.addConnection(FullConnection(inp, h1, name="input_LSTM"))
        net.addConnection(FullConnection(inp, h2, name="input_sigm"))
        # create connections from LSTM
        net.addConnection(FullConnection(h1, h2, name="LSTM_sigm"))

        # add whichever recurrent connections
        net.addRecurrentConnection(FullConnection(h1, h1, name='LSTM_rec'))
        net.addRecurrentConnection(FullConnection(h2, h1,
                                                  name='sigm_LSTM_rec'))
        # create connections to output
        net.addConnection(FullConnection(h1, outp, name="LSTM_outp"))
        net.addConnection(FullConnection(h2, outp, name="sigm_outp"))

        # finish up
        net.sortModules()
        net.randomize()
        self.printModules(net)
        self.e = [0 for param in range(len(net.params))]
        # for each action, need to accumulate the gradient
        self.accumulated_gradients = [[0 for param in range(len(net.params))]
                                      for i in range(self.numActions)]
        return net
コード例 #2
0
def trainFunc(params):
    iter, trainds, validds, input_size, hidden, func, eta, lmda, epochs = params
    print('Iter:', iter, 'Epochs:', epochs, 'Hidden_size:', hidden, 'Eta:',
          eta, 'Lamda:', lmda, 'Activation:', func)

    # Build network
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(input_size, name='in'))
    n.addModule(func(hidden, name='hidden'))
    n.addModule(LinearLayer(hidden, name='context'))
    n.addOutputModule(LinearLayer(1, name='out'))
    n.addConnection(FullConnection(n['in'], n['hidden'], name='in_to_hidden'))
    n.addConnection(FullConnection(n['hidden'], n['out'],
                                   name='hidden_to_out'))
    n.addRecurrentConnection(FullConnection(n['hidden'], n['context']))
    rnet = n
    rnet.sortModules()

    trainer = BackpropTrainer(n,
                              trainds,
                              learningrate=eta,
                              weightdecay=lmda,
                              momentum=0.1,
                              shuffle=False)
    trainer.trainEpochs(epochs)
    pred = np.nan_to_num(n.activateOnDataset(validds))
    validerr = eval.calc_RMSE(validds['target'], pred)
    varscore = explained_variance_score(validds['target'], pred)
    return validerr, varscore, n
コード例 #3
0
ファイル: ML.py プロジェクト: pombredanne/authorid
def anntrain(xdata,ydata):#,epochs):
    #print len(xdata[0])
    ds=SupervisedDataSet(len(xdata[0]),1)
    #ds=ClassificationDataSet(len(xdata[0]),1, nb_classes=2)
    for i,algo in enumerate (xdata):
        ds.addSample(algo,ydata[i])
    #ds._convertToOneOfMany( ) esto no
    net= FeedForwardNetwork()
    inp=LinearLayer(len(xdata[0]))
    h1=SigmoidLayer(1)
    outp=LinearLayer(1)
    net.addOutputModule(outp) 
    net.addInputModule(inp) 
    net.addModule(h1)
    #net=buildNetwork(len(xdata[0]),1,1,hiddenclass=TanhLayer,outclass=SoftmaxLayer)
    
    net.addConnection(FullConnection(inp, h1))  
    net.addConnection(FullConnection(h1, outp))

    net.sortModules()

    trainer=BackpropTrainer(net,ds)#, verbose=True)#dataset=ds,verbose=True)
    #trainer.trainEpochs(40)
    trainer.trainOnDataset(ds,40) 
    #trainer.trainUntilConvergence(ds, 20, verbose=True, validationProportion=0.15)
    trainer.testOnData()#verbose=True)
    #print 'Final weights:',net.params
    return net
コード例 #4
0
def createNetwork():
    # create network and layers
    net = FeedForwardNetwork()
    in_layer = LinearLayer(16)
    hid1_layer = SigmoidLayer(20)
    hid2_layer = SigmoidLayer(20)
    out_layer = SigmoidLayer(2)

    # add layers to network
    net.addInputModule(in_layer)
    net.addModule(hid1_layer)
    net.addModule(hid2_layer)
    net.addOutputModule(out_layer)

    # create connections between layers
    in_to_hid1 = FullConnection(in_layer, hid1_layer)
    hid1_to_hid2 = FullConnection(hid1_layer, hid2_layer)
    hid2_to_out = FullConnection(hid2_layer, out_layer)

    # add connections to network
    net.addConnection(in_to_hid1)
    net.addConnection(hid1_to_hid2)
    net.addConnection(hid2_to_out)

    # sort modules
    net.sortModules()

    return net
コード例 #5
0
def runNeuralSimulation(dataTrain, dataTest, train_tfidf, test_tfidf):
    outFile = open('neuralLog.txt','a')
    outFile.write('-------------------------------------\n')
    outFile.write('train==> %d, %d \n'%(train_tfidf.shape[0],train_tfidf.shape[1]))
    outFile.write('test==>  %d, %d \n'%(test_tfidf.shape[0],test_tfidf.shape[1]))
    
    trainDS = getDataSetFromTfidf(train_tfidf, dataTrain.target)
    testDS = getDataSetFromTfidf(test_tfidf, dataTest.target)
    
    print "Number of training patterns: ", len(trainDS)
    print "Input and output dimensions: ", trainDS.indim, trainDS.outdim
    print "First sample (input, target, class):"
    print len(trainDS['input'][0]), trainDS['target'][0], trainDS['class'][0]
    
#     with SimpleTimer('time to train', outFile):
#         net = buildNetwork(trainDS.indim, trainDS.indim/2, trainDS.indim/4, trainDS.indim/8, trainDS.indim/16, 2, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
#         trainer = BackpropTrainer( net, dataset=trainDS, momentum=0.1, verbose=True, weightdecay=0.01, batchlearning=True)
    net = RecurrentNetwork()
    net.addInputModule(LinearLayer(trainDS.indim, name='in'))
    net.addModule(SigmoidLayer(trainDS.indim/2, name='hidden'))
    net.addModule(SigmoidLayer(trainDS.indim/4, name='hidden2'))
    net.addOutputModule(SoftmaxLayer(2, name='out'))
    net.addConnection(FullConnection(net['in'], net['hidden'], name='c1'))
    net.addConnection(FullConnection(net['hidden'], net['out'], name='c2'))
    net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden'], name='c3'))
    net.addRecurrentConnection(FullConnection(net['hidden2'], net['hidden'], name='c4'))
    net.sortModules()
    trainer = BackpropTrainer( net, dataset=trainDS, momentum=0.01, verbose=True, weightdecay=0.01)
    
    outFile.write('%s \n' % (net.__str__()))
    epochs = 2000
    with SimpleTimer('time to train %d epochs' % epochs, outFile):
        for i in range(epochs):
            trainer.trainEpochs(1)
            trnresult = percentError( trainer.testOnClassData(),
                                  trainDS['class'] )
            tstresult = percentError( trainer.testOnClassData(
               dataset=testDS ), testDS['class'] )
    
            print "epoch: %4d" % trainer.totalepochs, \
                  "  train error: %5.2f%%" % trnresult, \
                  "  test error: %5.2f%%" % tstresult
            outFile.write('%5.2f , %5.2f \n' % (100.0-trnresult, 100.0-tstresult))
                  
    predicted = trainer.testOnClassData(dataset=testDS)
    results = predicted == testDS['class'].flatten()
    wrong = []
    for i in range(len(results)):
        if not results[i]:
            wrong.append(i)
    print 'classifier got these wrong:'
    for i in wrong[:10]:
        print dataTest.data[i], dataTest.target[i]
        outFile.write('%s %d \n' % (dataTest.data[i], dataTest.target[i]))
コード例 #6
0
    def buildTDnetwork(self):
        # create network and modules
        net = FeedForwardNetwork()
        inp = LinearLayer(self.n_input, name="Input")
        h1 = SigmoidLayer(10, name='sigm')
        outp = LinearLayer(1, name='output')
        # add modules
        net.addOutputModule(outp)
        net.addInputModule(inp)
        net.addModule(h1)
        # create connections from input
        net.addConnection(FullConnection(inp, h1, name="input_LSTM"))

        # create connections to output
        net.addConnection(FullConnection(h1, outp, name="LSTM_outp"))

        # finish up
        net.sortModules()
        net.randomize()

        return net
コード例 #7
0
def addSubNet(nn, prefix, indim, xdim, outdim):

    np = prefix + '_'

    nn.addModule(LinearLayer(indim, name=np + 'in'))
    nn.addModule(LinearLayer(outdim, name=np + 'out'))

    nn.addModule(SigmoidLayer(indim + xdim, name=np + 'f0'))
    nn.addModule(SigmoidLayer(indim + xdim, name=np + 'f1'))
    nn.addModule(SigmoidLayer(indim + xdim, name=np + 'f2'))
    nn.addModule(SigmoidLayer(indim + xdim, name=np + 'f3'))
    nn.addModule(SigmoidLayer(xdim, name=np + 'x'))

    nn.addConnection(
        FullConnection(nn[np + 'in'], nn[np + 'f0'], outSliceTo=indim))

    nn.addConnection(
        FullConnection(nn[np + 'f0'], nn[np + 'f1'], name=np + 'f0~f1'))
    nn.addConnection(
        FullConnection(nn[np + 'f1'], nn[np + 'f2'], name=np + 'f1~f2'))
    nn.addConnection(
        FullConnection(nn[np + 'f2'], nn[np + 'f3'], name=np + 'f2~f3'))
    nn.addConnection(
        FullConnection(nn[np + 'f3'], nn[np + 'x'], name=np + 'f3~x'))

    nn.addRecurrentConnection(
        FullConnection(nn[np + 'x'], nn[np + 'f0'], outSliceFrom=indim))

    for i in range(outdim):
        nn.addConnection(
            FullConnection(nn[np + 'x'],
                           nn[np + 'out'],
                           inSliceFrom=i,
                           inSliceTo=i + 1,
                           outSliceFrom=i,
                           outSliceTo=i + 1))
    nn.addConnection(FullConnection(nn['b'], nn[np + 'out']))
コード例 #8
0
    with open('best_individual.pkl', 'wb') as output:
        # save individual
        pickle.dump(individual, output, pickle.HIGHEST_PROTOCOL)


# loads an individual from a pickle file
def load_best_individual():
    with open('best_individual.pkl', 'rb') as inp:
        # load individual
        individual = pickle.load(inp)
    return individual


# create network and layers
net = FeedForwardNetwork()
in_layer = LinearLayer(16)
hid1_layer = SigmoidLayer(20)
hid2_layer = SigmoidLayer(20)
out_layer = SigmoidLayer(2)

# add layers to network
net.addInputModule(in_layer)
net.addModule(hid1_layer)
net.addModule(hid2_layer)
net.addOutputModule(out_layer)

# create connections between layers
in_to_hid1 = FullConnection(in_layer, hid1_layer)
hid1_to_hid2 = FullConnection(hid1_layer, hid2_layer)
hid2_to_out = FullConnection(hid2_layer, out_layer)
コード例 #9
0
def buildNetwork(*layers, **options):
    """Build arbitrary deep networks.
    
    `layers` should be a list or tuple of integers, that indicate how many 
    neurons the layers shoudl have. `bias` and `outputbias` are flags to 
    indicate wether the network should have the corresponding biases; both
    default to True.
        
    To adjust the classes for the layers use the `hiddenclass` and  `outclass`
    parameters, which expect a subclass of NeuronLayer.
    
    If the `recurrent` flag is set, a RecurrentNetwork will be created, 
    otherwise a FeedForwardNetwork.
    
    If the `fast` flag is set, faster arac networks will be used instead of the 
    pybrain implementations."""
    # options
    opt = {
        'bias': True,
        'hiddenclass': SigmoidLayer,
        'outclass': LinearLayer,
        'outputbias': True,
        'peepholes': False,
        'recurrent': False,
        'fast': False,
    }
    for key in options:
        if key not in opt.keys():
            raise NetworkError('buildNetwork unknown option: %s' % key)
        opt[key] = options[key]

    if len(layers) < 2:
        raise NetworkError(
            'buildNetwork needs 2 arguments for input and output layers at least.'
        )

    # Bind the right class to the Network name
    network_map = {
        (False, False): FeedForwardNetwork,
        (True, False): RecurrentNetwork,
    }
    try:
        network_map[(False, True)] = _FeedForwardNetwork
        network_map[(True, True)] = _RecurrentNetwork
    except NameError:
        if opt['fast']:
            raise NetworkError("No fast networks available.")
    if opt['hiddenclass'].sequential or opt['outclass'].sequential:
        if not opt['recurrent']:
            # CHECKME: a warning here?
            opt['recurrent'] = True
    Network = network_map[opt['recurrent'], opt['fast']]
    n = Network()
    # linear input layer
    n.addInputModule(LinearLayer(layers[0], name='in'))
    # output layer of type 'outclass'
    n.addOutputModule(opt['outclass'](layers[-1], name='out'))
    if opt['bias']:
        # add bias module and connection to out module, if desired
        n.addModule(BiasUnit(name='bias'))
        if opt['outputbias']:
            n.addConnection(FullConnection(n['bias'], n['out']))
    # arbitrary number of hidden layers of type 'hiddenclass'
    for i, num in enumerate(layers[1:-1]):
        layername = 'hidden%i' % i
        n.addModule(opt['hiddenclass'](num, name=layername))
        if opt['bias']:
            # also connect all the layers with the bias
            n.addConnection(FullConnection(n['bias'], n[layername]))
    # connections between hidden layers
    for i in range(len(layers) - 3):
        n.addConnection(
            FullConnection(n['hidden%i' % i], n['hidden%i' % (i + 1)]))
    # other connections
    if len(layers) == 2:
        # flat network, connection from in to out
        n.addConnection(FullConnection(n['in'], n['out']))
    else:
        # network with hidden layer(s), connections from in to first hidden and last hidden to out
        n.addConnection(FullConnection(n['in'], n['hidden0']))
        n.addConnection(
            FullConnection(n['hidden%i' % (len(layers) - 3)], n['out']))

    # recurrent connections
    if issubclass(opt['hiddenclass'], LSTMLayer):
        if len(layers) > 3:
            errorexit(
                "LSTM networks with > 1 hidden layers are not supported!")
        n.addRecurrentConnection(FullConnection(n['hidden0'], n['hidden0']))

    n.sortModules()
    return n
コード例 #10
0

def seqStr(s):
    return SeqGenerator().nextStr(s)


import pybrain
import pybrain.tools.shortcuts as bs
from pybrain.structure.modules import BiasUnit, SigmoidLayer, LinearLayer, LSTMLayer, SoftmaxLayer
import pybrain.structure.networks as bn
import pybrain.structure.connections as bc
import pybrain.datasets.sequential as bd

print "preparing network ...",
nn = bn.RecurrentNetwork()
nn.addInputModule(LinearLayer(9, name="in"))
nn.addModule(LSTMLayer(6, name="hidden"))
nn.addOutputModule(LinearLayer(2, name="out"))
nn.addConnection(bc.FullConnection(nn["in"], nn["hidden"], name="c1"))
nn.addConnection(bc.FullConnection(nn["hidden"], nn["out"], name="c2"))
nn.addRecurrentConnection(
    bc.FullConnection(nn["hidden"], nn["hidden"], name="c3"))
nn.sortModules()
print "done"

import random


def getRandomSeq(seqlen, ratevarlimit=0.2):
    s = ""
    count = 0
コード例 #11
0
ファイル: ffann.py プロジェクト: mattgabor/waldo-ffnn
for n in xrange(0, trainingDataTemp.getLength()):
    trainingData.addSample(
        trainingDataTemp.getSample(n)[0],
        trainingDataTemp.getSample(n)[1])

# reencode outputs, necessary for training accurately
testingData._convertToOneOfMany()
trainingData._convertToOneOfMany()

##### BUILD ANN #####
# build feed-forward multi-layer perceptron ANN
fnn = FeedForwardNetwork()

# create layers: 9 input layer nodes (8 features + 1 bias), 3 hidden layer nodes, 10 output layer nodes
bias = BiasUnit(name='bias unit')
input_layer = LinearLayer(64 * 64 * 3, name='input layer')
hidden_layer = SigmoidLayer(64 * 64 * 3 / 2, name='hidden layer')
output_layer = SigmoidLayer(2, name='output layer')

# create connections with full connectivity between layers
bias_to_hidden = FullConnection(bias, hidden_layer, name='bias-hid')
bias_to_output = FullConnection(bias, output_layer, name='bias-out')
input_to_hidden = FullConnection(input_layer, hidden_layer, name='in-hid')
hidden_to_output = FullConnection(hidden_layer, output_layer, name='hid-out')

# add layers & connections to network
fnn.addModule(bias)
fnn.addInputModule(input_layer)
fnn.addModule(hidden_layer)
fnn.addOutputModule(output_layer)
fnn.addConnection(bias_to_hidden)
コード例 #12
0
ファイル: mdrnnlayer.py プロジェクト: ikyzmin/pybrain
    def __init__(self,
                 timedim,
                 shape,
                 hiddendim,
                 outsize,
                 blockshape=None,
                 name=None):
        """Initialize an MdrnnLayer.

        The dimensionality of the sequence - for example 2 for a
        picture or 3 for a video - is given by `timedim`, while the sidelengths
        along each dimension are given by the tuple `shape`.

        The layer will have `hiddendim` hidden units per swiping direction. The
        number of swiping directions is given by 2**timedim, which corresponds
        to one swipe from each corner to its opposing corner and back.

        To indicate how many outputs per timesteps are used, you have to specify
        `outsize`.

        In order to treat blocks of the input and not single voxels, you can
        also specify `blockshape`. For example the layer will then feed (2, 2)
        chunks into the network at each timestep which correspond to the (2, 2)
        rectangles that the input can be split into.
        """
        self.timedim = timedim
        self.shape = shape
        blockshape = tuple([1] * timedim) if blockshape is None else blockshape
        self.blockshape = shape
        self.hiddendim = hiddendim
        self.outsize = outsize
        self.indim = reduce(operator.mul, shape, 1)
        self.blocksize = reduce(operator.mul, blockshape, 1)
        self.sequenceLength = self.indim / self.blocksize
        self.outdim = self.sequenceLength * self.outsize

        self.bufferlist = [('cellStates', self.sequenceLength * self.hiddendim)
                           ]

        Module.__init__(self, self.indim, self.outdim, name=name)

        # Amount of parameters that are required for the input to the hidden
        self.num_in_params = self.blocksize * self.hiddendim * (3 +
                                                                self.timedim)

        # Amount of parameters that are needed for the recurrent connections.
        # There is one of the parameter for every time dimension.
        self.num_rec_params = outsize * hiddendim * (3 + self.timedim)

        # Amount of parameters that are needed for the output.
        self.num_out_params = outsize * hiddendim

        # Amount of parameters that are needed from the bias to the hidden and
        # the output
        self.num_bias_params = (3 +
                                self.timedim) * self.hiddendim + self.outsize

        # Total list of parameters.
        self.num_params = sum(
            (self.num_in_params, self.timedim * self.num_rec_params,
             self.num_out_params, self.num_bias_params))

        ParameterContainer.__init__(self, self.num_params)

        # Some layers for internal use.
        self.hiddenlayer = MDLSTMLayer(self.hiddendim, self.timedim)

        # Every point in the sequence has timedim predecessors.
        self.predlayers = [LinearLayer(self.outsize) for _ in range(timedim)]

        # We need a single layer to hold the input. We will swipe a connection
        # over the corrects part of it, in order to feed the correct input in.
        self.inlayer = LinearLayer(self.indim)
        # Make some layers the same to save memory.
        self.inlayer.inputbuffer = self.inlayer.outputbuffer = self.inputbuffer

        # In order to allocate not too much memory, we just set the size of the
        # layer to 1 and correct it afterwards.
        self.outlayer = LinearLayer(self.outdim)
        self.outlayer.inputbuffer = self.outlayer.outputbuffer = self.outputbuffer

        self.bias = BiasUnit()
コード例 #13
0
# initialize a pybrain dataset
DS = SupervisedDataSet(len(dataset.values[0]), np.size(tgt.values[0]))

# fill it
for i in xrange(len(dataset)):
    DS.appendLinked(dataset.values[i], [tgt.values[i]])

# split 70% for training, 30% for testing
train_set, test_set = DS.splitWithProportion(.7)

# build our recurrent network with 10 hidden neurodes, one recurrent
# connection, using tanh activation functions
net = RecurrentNetwork()
hidden_neurodes = 10
net.addInputModule(LinearLayer(len(train_set["input"][0]), name="in"))
net.addModule(TanhLayer(hidden_neurodes, name="hidden1"))
net.addOutputModule(LinearLayer(len(train_set["target"][0]), name="out"))
net.addConnection(FullConnection(net["in"], net["hidden1"], name="c1"))
net.addConnection(FullConnection(net["hidden1"], net["out"], name="c2"))
net.addRecurrentConnection(
    FullConnection(net["out"], net["hidden1"], name="cout"))
net.sortModules()
net.randomize()

# train for 30 epochs (overkill) using the rprop- training algorithm
trainer = RPropMinusTrainer(net, dataset=train_set, verbose=True)
trainer.trainOnDataset(train_set, 30)

# test on training set
predictions_train = np.array(
コード例 #14
0
        yield freqs


import pybrain
import pybrain.tools.shortcuts as bs
from pybrain.structure.modules import BiasUnit, SigmoidLayer, LinearLayer, LSTMLayer, SoftmaxLayer
import pybrain.structure.networks as bn
import pybrain.structure.connections as bc
import pybrain.rl.learners.valuebased as bl
import pybrain.datasets.sequential as bd

MIDINOTENUM = 128

print "preparing network ...",
nn = bn.RecurrentNetwork()
nn_in_origaudio = LinearLayer(N_window / 2 + 1,
                              name="audioin")  # audio freqs input, mono signal
nn_out_midi = SigmoidLayer(MIDINOTENUM * 2, name="outmidi")

nn.addInputModule(nn_in_origaudio)
nn.addOutputModule(nn_out_midi)

#nn_hidden_in = LinearLayer(20, name="hidden-in")
#nn_hidden_mid = LSTMLayer(20, name="hidden-lstm")
#nn_hidden_out = LinearLayer(5, name="hidden-out")
nn_hidden_in = LSTMLayer(nn_out_midi.indim, name="hidden-in")
nn_hidden_out = SigmoidLayer(nn_out_midi.indim * 2, name="hidden-out")

nn.addModule(nn_hidden_in)
if nn_hidden_out is not nn_hidden_in: nn.addModule(nn_hidden_out)

コード例 #15
0
		def __init__(self, parent, **kwargs):
			LinearLayer.__init__(self, self.dim, **kwargs)
			self.parent = parent
コード例 #16
0
def main():
    results = []
    args = parse_args()

    # there's a bug in the _sparseness method in sklearn's nmf module that is
    # hit in some edge cases.  The value it computes isn't actually needed in
    # this case, so we can just ignore this divide by 0 error
    np.seterr(invalid="ignore")

    mtx = np.loadtxt(args.data_file, delimiter=',', skiprows=1)
    clabels = np.loadtxt(args.class_file, delimiter=',')

    print("Matrix is %d by %d and %f sparse" %
          (len(mtx), len(mtx[0]), Matrix.get_sparsity(mtx)))
    #print("clabels is %d by %d and %f sparse" % (len(clabels), len(clabels[0]), Matrix.get_sparsity(clabels)))
    #mtx = np.matrix.transpose(mtx)  # transpose to put samples into columns, genes into rows

    # create random class labels, replace with result of NMF
    #clabels = np.zeros(len(mtx))
    #for i in range(len(mtx)):
    # clabels[i] = random.randint(0, 3)
    clabels = np.matrix.transpose(clabels)

    print '-----------Logestic Regression-----------'
    t_lacc = 0
    for i in range(10):
        t_lacc = t_lacc + logistic_regression(mtx, clabels, True)

    print 'accuracy of logistic regression ', (t_lacc * 10)

    print '-----------ANN Computation----------'
    # prepare dataset for ANN
    ds = ClassificationDataSet(len(mtx[0]), 1,
                               nb_classes=5)  # replace with result of NMF
    for k in xrange(len(mtx)):
        ds.addSample(np.ravel(mtx[k]), clabels[k])

    # 10-fold cv
    t_error = 0
    t_acc = 0
    for i in range(10):
        # divide the data into training and test sets

        tstdata_temp, trndata_temp = ds.splitWithProportion(0.10)

        tstdata = ClassificationDataSet(len(mtx[0]), 1, nb_classes=5)
        for n in xrange(0, tstdata_temp.getLength()):
            tstdata.addSample(
                tstdata_temp.getSample(n)[0],
                tstdata_temp.getSample(n)[1])

        trndata = ClassificationDataSet(len(mtx[0]), 1, nb_classes=5)
        for n in xrange(0, trndata_temp.getLength()):
            trndata.addSample(
                trndata_temp.getSample(n)[0],
                trndata_temp.getSample(n)[1])

        trndata._convertToOneOfMany()
        tstdata._convertToOneOfMany()

        fnn = FeedForwardNetwork()
        inp = LinearLayer(trndata.indim)
        h1 = SigmoidLayer(10)
        h2 = TanhLayer(10)
        h3 = TanhLayer(10)
        h4 = TanhLayer(10)
        h5 = TanhLayer(10)
        outp = LinearLayer(trndata.outdim)
        #fnn = buildNetwork( trndata.indim, 10 , trndata.outdim, outclass=SoftmaxLayer )

        # add modules
        fnn.addOutputModule(outp)
        fnn.addInputModule(inp)
        fnn.addModule(h1)
        fnn.addModule(h2)
        fnn.addModule(h3)
        fnn.addModule(h4)
        fnn.addModule(h5)
        # create connections
        fnn.addConnection(FullConnection(inp, h1))
        fnn.addConnection(FullConnection(inp, h2))
        fnn.addConnection(FullConnection(inp, h3))
        fnn.addConnection(FullConnection(inp, h4))
        fnn.addConnection(FullConnection(inp, h5))
        fnn.addConnection(FullConnection(h1, h2))
        fnn.addConnection(FullConnection(h2, h3))
        fnn.addConnection(FullConnection(h3, h4))
        fnn.addConnection(FullConnection(h4, h5))

        fnn.addConnection(FullConnection(h5, outp))

        fnn.sortModules()

        trainer = BackpropTrainer(fnn,
                                  dataset=trndata,
                                  momentum=0.1,
                                  learningrate=0.01,
                                  verbose=True,
                                  weightdecay=0.01)

        #trainer.trainUntilConvergence()
        trainer.trainEpochs(5)

        t_error = t_error + percentError(
            trainer.testOnClassData(dataset=tstdata), tstdata['class'])

    print 'avg error ', (t_error / 10)
    print 'avg acc ', (100 - (t - error / 10))
コード例 #17
0
def multigaussian(x, mean, stddev):
    """Returns value of uncorrelated Gaussians at given scalar point.
    x: scalar
    mean: vector
    stddev: vector
    """
    tmp = -0.5 * ((x - mean) / stddev)**2
    return np.exp(tmp) / (np.sqrt(2. * np.pi) * stddev)


if __name__ == '__main__':
    # build a network
    n = FeedForwardNetwork()
    # linear input layer
    n.addInputModule(LinearLayer(1, name='in'))
    # output layer of type 'outclass'
    N_GAUSSIANS = 3
    n.addOutputModule(MixtureDensityLayer(dim=1, name='out', mix=N_GAUSSIANS))
    # add bias module and connection to out module
    n.addModule(BiasUnit(name='bias'))
    n.addConnection(FullConnection(n['bias'], n['out']))

    # arbitrary number of hidden layers of type 'hiddenclass'
    n.addModule(SigmoidLayer(5, name='hidden'))
    n.addConnection(FullConnection(n['bias'], n['hidden']))

    # network with hidden layer(s), connections
    # from in to first hidden and last hidden to out
    n.addConnection(FullConnection(n['in'], n['hidden']))
    n.addConnection(FullConnection(n['hidden'], n['out']))
コード例 #18
0
import pybrain
import pybrain.tools.shortcuts as bs
from pybrain.structure.modules import BiasUnit, SigmoidLayer, LinearLayer, LSTMLayer, SoftmaxLayer
import pybrain.structure.networks as bn
import pybrain.structure.connections as bc
import pybrain.rl.learners.valuebased as bl
import pybrain.supervised as bt
import pybrain.datasets.sequential as bd


MIDINOTENUM = 128

print "preparing network ...",
nn = bn.RecurrentNetwork()
nn_in_origaudio = LinearLayer(1, name="audioin") # audio input, mono signal
nn_in_sampleraudio = LinearLayer(1, name="sampleraudio") # audio from midi sampler
nn_in_curmidikeys = LinearLayer(MIDINOTENUM, name="curmidikeys")
nn_in_curmidikeyvels = LinearLayer(MIDINOTENUM, name="curmidikeyvels")
nn_out_midikeys = LinearLayer(MIDINOTENUM, name="outmidikeys")
nn_out_midikeyvels = LinearLayer(MIDINOTENUM, name="outmidikeyvels")
nn_hidden_in = LSTMLayer(6, name="hidden")
nn_hidden_out = nn_hidden_in

nn.addModule(nn_hidden_in)
if nn_hidden_out is not nn_hidden_in: nn.addModule(nn_hidden_out)

nn.addRecurrentConnection(bc.FullConnection(nn_hidden_out, nn_hidden_in, name="recurrent_conn"))


TicksPerSecond = 100
コード例 #19
0
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer, LinearLayer, SigmoidLayer, TanhLayer
from gamesteamsimportwithoutfcs import getMeTeamsAndGamesBitch
from gamesteamsimportwithoutfcs import attributelist


def Normalize(minmaxtuple, value):
    newvalues = []
    denom = minmaxtuple[1] - minmaxtuple[0]
    return (value - minmaxtuple[0]) / denom


n = FeedForwardNetwork()
inLayer = LinearLayer(len(attributelist) * 2)
hiddenLayer = SigmoidLayer(3)
outLayer = LinearLayer(1)

n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)

in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)

n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)

n.sortModules()