Try writing it to an xml file, reread it and determine if it looks the same:

    >>> from pybrain.tests import xmlInvariance
    >>> xmlInvariance(n)
    Same representation
    Same function
    Same class

"""

__author__ = 'Tom Schaul, [email protected]'

from scipy import ones

from pybrain.structure.networks import NeuronDecomposableNetwork
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tests import runModuleTestSuite


def buildDecomposableNetwork():
    """ three hidden neurons, with 2 in- and 2 outconnections each. """
    n = buildNetwork(2, 3, 2, bias = False)
    ndc = NeuronDecomposableNetwork.convertNormalNetwork(n)
    # set all the weights to 1
    ndc._setParameters(ones(12))
    return ndc

if __name__ == "__main__":
    runModuleTestSuite(__import__('__main__'))

Exemple #2
0
    >>> t = BackpropTrainer(n, learningrate = 0.01, momentum = 0.99, verbose = True)
    >>> t.trainOnDataset(ds, 4)
    Total error: 2.44696473875
    Total error: 1.97570498879
    Total error: 1.23940309483
    Total error: 0.546129967878
    >>> abs(n.params[10:15] - array([ -0.53868206, -0.54185834,  0.26726394, -1.90008234, -1.12114946])).round(5)
    array([ 0.,  0.,  0.,  0.,  0.])

Now the same for RPROP

    >>> t = RPropMinusTrainer(n, verbose = True)
    >>> t.trainOnDataset(ds, 4)
    epoch      0  total error      0.16818   avg weight       0.92638
    epoch      1  total error      0.15007   avg weight       0.92202
    epoch      2  total error      0.15572   avg weight       0.92684
    epoch      3  total error      0.13036   avg weight       0.92604
    >>> abs(n.params[5:10] - array([ -0.19241111,  1.43404022,  0.23062397, -0.40105413,  0.62100109])).round(5)
    array([ 0.,  0.,  0.,  0.,  0.])

"""

__author__ = "Martin Felder, [email protected]"


from pybrain.tests import runModuleTestSuite

if __name__ == "__main__":
    runModuleTestSuite(__import__("__main__"))
Exemple #3
0

def buildSimpleMDLSTMNetwork(peepholes=False):
    N = RecurrentNetwork('simpleMDLstmNet')
    i = LinearLayer(1, name='i')
    dim = 1
    h = MDLSTMLayer(dim, peepholes=peepholes, name='MDlstm')
    o = LinearLayer(1, name='o')
    b = BiasUnit('bias')
    N.addModule(b)
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(FullConnection(i, h, outSliceTo=4 * dim, name='f1'))
    N.addConnection(FullConnection(b, h, outSliceTo=4 * dim, name='f2'))
    N.addRecurrentConnection(
        FullConnection(h, h, inSliceTo=dim, outSliceTo=4 * dim, name='r1'))
    N.addRecurrentConnection(
        IdentityConnection(h,
                           h,
                           inSliceFrom=dim,
                           outSliceFrom=4 * dim,
                           name='rstate'))
    N.addConnection(FullConnection(h, o, inSliceTo=dim, name='f3'))
    N.sortModules()
    return N


if __name__ == "__main__":
    runModuleTestSuite(__import__('__main__'))
Exemple #4
0
            self.w[:] = self.f
        if fullUpdate and self.hasLearnableConn():
            for c in self.conns:
                c.updateLearnParams()

    def _forwardImplementation(self, inbuf, outbuf):
        extin = inbuf[:self.dim]
        # update frequencies


        if self.hasLearnableConn():
            z, fr, conns = zfcrk4(self.t, self.dt, self, extin)
            for i in range(len(self.conns)):
                # self.conns[i].c[:] = limitC(np.reshape(conns[i], self.conns[i].c.shape), self.conns[i].roote)
                self.conns[i].c[:] = conns[i]
        else:
            z, fr = zfrk4(self.t, self.dt, self, extin)

        self.fr[:] = np.minimum(np.maximum(fr, self.fr_min), self.fr_max)
        self.f[:] = self.fr / TWO_PI
        self.updateOscParams(self.offset % self.fupdate == 0)


        self.t += self.dt
        outbuf[:] = z

if __name__ == "__main__":
    from pybrain.tests import runModuleTestSuite
    import pygfnn.tests.unittests.structure.modules.test_simple_gfnn_network as test
    runModuleTestSuite(test)