Esempio n. 1
0
    def __init__(self,
                 input_dim=None,
                 output_dim=None,
                 dtype='float64',
                 prototype=None):
        """ Initializes and constructs a random reservoir.
                
        output_dim -- the number of outputs, which is also the number of
                          neurons in the reservoir
        prototype -- a prototype reservoir which will be cloned with all
                     its parameters
        """
        super(ReservoirNode, self).__init__(input_dim, output_dim, dtype)

        if prototype:
            # copy network
            if dtype == 'float64':
                self.reservoir = au.DoubleESN(prototype)
            else:
                self.reservoir = au.SingleESN(prototype)
        else:
            # create new network
            if dtype == 'float64':
                self.reservoir = au.DoubleESN()
            else:
                self.reservoir = au.SingleESN()

        # reset input and output dimension
        if input_dim:
            self.reservoir.setInputs(input_dim)
        if output_dim:
            self.reservoir.setSize(output_dim)

        # finally initialize the reservoir
        self.reservoir.init()
def setup_ESN_DS():
    """ One delay&sum ESN for the simulation
    """
    model = au.DoubleESN()
    model.setSize(100)  # 200 is bissl besser
    model.setInputs(14)
    model.setOutputs(9)
    model.setInitParam(au.ALPHA, 0.7)
    #    model.setInitParam(au.ALPHA, 0.2)
    model.setInitParam(au.CONNECTIVITY, 0.2)
    model.setInitParam(au.IN_SCALE, 1.5)
    model.setInitParam(au.IN_CONNECTIVITY, 1.)
    model.setOutputAct(au.ACT_TANH)
    model.trainnoise = 1e-5
    # delay and sum ESN
    model.setSimAlgorithm(au.SIM_FILTER_DS)
    #model.setSimAlgorithm(au.SIM_SQUARE)
    model.setTrainAlgorithm(au.TRAIN_DS_PI)
    model.setInitParam(au.DS_USE_CROSSCORR)
    model.maxdelay = 7
    model.setInitParam(au.DS_MAXDELAY, model.maxdelay)
    model.setInitParam(au.DS_EM_ITERATIONS, 1)
    #    model.setInitParam(au.EM_VERSION, 1)
    model.ds = 1
    # leaky integrator neurons
    #    leak_rate = 0.2
    #    B = np.zeros((model.getSize(),2))
    #    A = np.zeros((model.getSize(),2))
    #    B[:,0] = 1.
    #    A[:,0] = 1.
    #    A[:,1] = (-1)*(1. - leak_rate)
    #    model.setIIRCoeff(B,A)
    # should we  choose randomly from the training exampless ?
    model.randrange = 1
    return model
Esempio n. 3
0
def construct_au_esn(size=100,
                     conn=0.1,
                     ins=257,
                     outs=6,
                     washout=0,
                     datatype='float64'):
    """ Aureservoir ESN.
    """
    model = au.DoubleESN()
    model.setSize(size)
    #    model.setSize(400) # last output geht hier besser, mean auch
    #    model.setSize(1000) # bei last output 0 testerror
    model.setInputs(ins)
    model.setOutputs(outs)
    model.setInitParam(au.ALPHA, 0.2)
    #    model.setInitParam(au.ALPHA, 0.5)
    model.setInitParam(au.CONNECTIVITY, conn)
    model.setInitParam(au.IN_SCALE, 1)
    model.setInitParam(au.IN_CONNECTIVITY, 0.3)
    model.setOutputAct(au.ACT_TANH)
    model.setSimAlgorithm(au.SIM_LI)
    model.setInitParam(au.LEAKING_RATE, 0.2)
    #    model.setSimAlgorithm(au.SIM_FILTER_DS)
    #    model.setSimAlgorithm(au.SIM_SQUARE)   # bringt manchmal doch was !!!
    #    model.setTrainAlgorithm( au.TRAIN_DS_PI )
    #    model.setInitParam(au.DS_USE_CROSSCORR)
    #    model.maxdelay = 100
    #    model.setInitParam(au.DS_MAXDELAY, model.maxdelay)
    #    model.setInitParam(au.DS_EM_ITERATIONS, 1)
    #    model.setInitParam(au.EM_VERSION, 1)

    # make multiple ESNs
    multiple_esns = 0
    if multiple_esns:
        array_size = 20
        print "ArrayESN: Using", array_size, "reservoir"
        model = au.DoubleArrayESN(model, array_size)

    # has d+s readout (1) or arrayesn (2) ?
    model.ds = 0

    # additional properties
    model.type = 'ESN_au'
    model.dtype = datatype
    #    model.trainnoise = 1.e-5
    model.trainnoise = 0.
    model.testnoise = 0.
    model.randrange = 0
    model.washout = washout

    # init ESN
    model.init()

    return model
Esempio n. 4
0
def logistic_map_sfa():
    """ extracts the slowly varying features of some functions """
    
    # slowly varying driving force: a combination of three sine waves
    p2 = mdp.numx.pi*2
    t = mdp.numx.linspace(0,1,10000,endpoint=0) # time axis 1s, samplerate 10KHz
    dforce = mdp.numx.sin(p2*5*t) + mdp.numx.sin(p2*11*t) + mdp.numx.sin(p2*13*t)
    
    # input timeseries: variables on columns and observations on rows
    series = mdp.numx.zeros((10000,1),'d')
    series[0] = 0.6
    for i in range(1,10000):
        series[i] = logistic_map(series[i-1],3.6+0.13*dforce[i])

#    pylab.plot( series, '.' )
#    pylab.show()
    
    # Flow to perform SFA in the space of polynomials of degree 3
    # EtaComputerNode: measures slowness
    # TimeFramesNode: embeds the 1-dimensional time series in a 10 dimensional
    #                 space using a sliding temporal window of size 10
#    flow = mdp.Flow([mdp.nodes.TimeFramesNode(10), \
#                     mdp.nodes.PolynomialExpansionNode(3), \
#                     mdp.nodes.SFANode(output_dim=3)] )
    
    # reservoir prototype
    prot = au.DoubleESN()
    prot.setInitParam( au.CONNECTIVITY, 0.1 )
    prot.setInitParam( au.ALPHA, 0.9 )
    prot.setSize(100)
    
    # Reservoir Node with SFA
    flow = mdp.Flow([ReservoirNode(1, 100, prototype=prot), \
                        #mdp.nodes.PCANode(output_dim=30,svd=True), \
#                        mdp.nodes.PolynomialExpansionNode(2), \
                        mdp.nodes.SFANode(output_dim=3)])
    
    flow.train(series)
    
    # execute flow to get the slow features
    slow = flow(series)
    
    # rescale driving force for comparison
    resc_dforce = (dforce - mdp.numx.mean(dforce,0))/mdp.numx.std(dforce,0)
    
#    print 'Eta value (time series): ', flow[0].get_eta(t=10000)
#    print 'Eta value (slow feature): ', flow[4].get_eta(t=9990)
    
    pylab.plot( slow[:,0] )
#    pylab.plot( slow[:,1] )
#    pylab.plot( slow[:,2] )
    pylab.plot( resc_dforce )
    pylab.show()
Esempio n. 5
0
    def testWithIterator(self, level=1):
        """ Test MDP-nodes with iterator training and multiple calls
        """
        # construct mdp ESN
        reservoir = ReservoirNode(self.inputs, self.size,
                                  dtype='float64', prototype=self.net)
        readout = LinearReadoutNode(self.size+self.inputs, self.outputs,
                                    ignore=self.washout)

        # init networks and copy it to really have the same one
        self.net.init()
        reservoir.reservoir  = au.DoubleESN(self.net)

        # build hierarchical mdp network
        res = mdp.hinet.SameInputLayer([reservoir,
                                        IdentityNode(self.inputs, self.inputs)])
        mdp_net = mdp.Flow([res, readout])

        # train aureservoir ESN
        self.net.train(self.train_in.T.copy(), self.train_out.T.copy(),
                       self.washout)

        # train mdp ESN with iterator, 1 stage
#        traindata = (self.train_in, self.train_out)
#        mdp_net.train([None,
#                       None,
#                       DataIterator(traindata)] )
        
        # train mdp ESN in multiple stages
        stages = 5
        ssteps = self.train_steps / stages
        traindata = []
        for n in range(stages):
            chunk = (self.train_in[n*ssteps:(n+1)*ssteps,:],
                     self.train_out[n*ssteps:(n+1)*ssteps,:])
            traindata.append(chunk)
            
        mdp_net.train([None, DataIterator(traindata)] )

        # test for output weights
        au_w = self.net.getWout().copy().T
        mdp_w = mdp_net[1].W  # readout.W
        assert_array_almost_equal(au_w, mdp_w)

        # run aureservoir model with test data
        au_out = np.zeros((self.outputs, self.test_steps))
        self.net.simulate(self.test_in.T.copy(), au_out)

        # run mdp model with test data
        mdp_out = mdp_net(self.test_in)

        # compare output data
        assert_array_almost_equal(au_out.T, mdp_out)
Esempio n. 6
0
    def testMultipleTrainCalls(self, level=1):
        """ Test multiple calls of train-method
        """
        # construct mdp ESN
        reservoir = ReservoirNode(self.inputs, self.size,
                                  dtype='float64', prototype=self.net)
        readout = LinearReadoutNode(self.size+self.inputs, self.outputs,
                                    ignore=self.washout)

        # init networks and copy it to really have the same one
        self.net.init()
        reservoir.reservoir  = au.DoubleESN(self.net)

        # build hierarchical mdp network
        res = mdp.hinet.SameInputLayer([reservoir,
                                        IdentityNode(self.inputs, self.inputs)])
        flow = mdp.Flow([res, readout])
        mdp_net = mdp.hinet.FlowNode(flow)

        # train aureservoir ESN
        self.net.train(self.train_in.T.copy(), self.train_out.T.copy(),
                       self.washout)

        # train mdp ESN in multiple stages
        stages = 5
        ssteps = self.train_steps / stages
        for n in range(stages):
            iin = self.train_in[n*ssteps:(n+1)*ssteps,:]
            oout = self.train_out[n*ssteps:(n+1)*ssteps,:]
            mdp_net.train(iin,oout)
            #mdp_net.train([None, None, [(iin,oout)]])
        mdp_net.stop_training()
        
        # test for output weights
        au_w = self.net.getWout().copy().T
        mdp_w = mdp_net._flow[1].W  # readout.W
        assert_array_almost_equal(au_w, mdp_w)

        # run aureservoir model with test data
        au_out = np.zeros((self.outputs, self.test_steps))
        self.net.simulate(self.test_in.T.copy(), au_out)

        # run mdp model with test data
        mdp_out = mdp_net(self.test_in)

        # compare output data
        assert_array_almost_equal(au_out.T, mdp_out)
Esempio n. 7
0
    def setUp(self):
        """ Setup routine for all tests.
        """
        self.size = 10
        self.inputs = 2
        self.outputs = 2
        self.train_steps = 50 # durchh 5 teilbar !
        self.test_steps = 50
        self.washout = 8
        
        # construct aureservoir ESN
        self.net = au.DoubleESN()
        self.net.setInputs(self.inputs)
        self.net.setOutputs(self.outputs)
        self.net.setSize(self.size)

        # make some training and testing data
        self.train_in = np.random.rand(self.train_steps,self.inputs)
        self.test_in = np.random.rand(self.test_steps,self.inputs)
        self.train_out = np.random.rand(self.train_steps,self.outputs)
def setup_ESN_jaeger_nr1():
    """ The "Method Nr.1" of Jaegers paper.
    """
    model = au.DoubleESN()
    model.setSize(100)
    model.setInputs(14)
    model.setOutputs(9)
    #model.setInitParam(au.ALPHA, 0.2)
    model.setInitParam(au.ALPHA, 0.7)
    model.setInitParam(au.CONNECTIVITY, 0.2)
    model.setInitParam(au.IN_SCALE, 1.5)
    model.setInitParam(au.IN_CONNECTIVITY, 1.)
    model.setOutputAct(au.ACT_TANH)
    # leaky integrator ESN
    #model.setSimAlgorithm(au.SIM_LI)
    #model.setInitParam(au.LEAKING_RATE, 0.2)
    model.trainnoise = 1.e-5
    model.ds = 0
    # should we  choose randomly from the training exampless ?
    model.randrange = 1
    return model
Esempio n. 9
0
    def testSimpleESN(self, level=1):
        """ Test MDP-nodes against an ESN from aureservoir.
        """
        # construct mdp ESN
        reservoir = ReservoirNode(self.inputs, self.size,
                                  dtype='float64', prototype=self.net)
        readout = LinearReadoutNode(self.size+self.inputs, self.outputs,
                                    ignore=self.washout)

        # init networks and copy it to really have the same one
        self.net.init()
        reservoir.reservoir  = au.DoubleESN(self.net)

        # build hierarchical mdp network
        res = mdp.hinet.SameInputLayer([reservoir,
                                        IdentityNode(self.inputs, self.inputs)])
        mdp_net = mdp.Flow([res, readout])

        # train aureservoir ESN
        self.net.train(self.train_in.T.copy(), self.train_out.T.copy(),
                       self.washout)

        # train mdp network
        # (nodes which can't be trained can be given a None)
        mdp_net.train([None, [(self.train_in, self.train_out)]])

        # test for output weights
        au_w = self.net.getWout().copy().T
        mdp_w = mdp_net[1].W  # readout.W
        assert_array_almost_equal(au_w, mdp_w)

        # run aureservoir model with test data
        au_out = np.zeros((self.outputs, self.test_steps))
        self.net.simulate(self.test_in.T.copy(), au_out)

        # run mdp model with test data
        mdp_out = mdp_net(self.test_in)

        # compare output data
        assert_array_almost_equal(au_out.T, mdp_out)
Esempio n. 10
0
def reservoir_sfa(testsignal, size=100, conn=0.1):

    testsignal.shape = -1, 1

    # reservoir prototype
    prot = au.DoubleESN()
    prot.setInitParam(au.CONNECTIVITY, conn)
    prot.setInitParam(au.ALPHA, 0.9)
    prot.setSize(size)

    # Reservoir Node with SFA
    res_sfa = mdp.Flow([
        ReservoirNode(1, size, prototype=prot),
        mdp.nodes.PCANode(output_dim=300, svd=True),
        #                        mdp.nodes.PolynomialExpansionNode(2), \
        mdp.nodes.SFANode(output_dim=3)
    ])

    # train and execute flow to get the slow features
    res_sfa.train(testsignal)
    slow = res_sfa(testsignal)

    return slow
Esempio n. 11
0
def setup_single_ESN():
    """ The "Method Nr.1" of Jaegers paper, implemented with MDP.
    """
    res_size = 100
    prot = au.DoubleESN()
    prot.setSize(res_size)
    prot.setInputs(14)
    prot.setOutputs(9)
    #prot.setInitParam(au.ALPHA, 0.2)
    prot.setInitParam(au.ALPHA, 0.7)
    prot.setInitParam(au.CONNECTIVITY, 0.2)
    prot.setInitParam(au.IN_SCALE, 1.5)
    prot.setInitParam(au.IN_CONNECTIVITY, 1.)
    prot.setOutputAct(au.ACT_TANH)
    # leaky integrator ESN
    #prot.setSimAlgorithm(au.SIM_LI)
    #prot.setInitParam(au.LEAKING_RATE, 0.2)

    # construct ESN
    reservoir = ReservoirNode(input_dim=14,
                              output_dim=res_size,
                              dtype='float64',
                              prototype=prot)
    readout = LinearReadoutNode(input_dim=res_size + 14,
                                output_dim=9,
                                use_pi=1)
    # build hierarchical mdp network
    switchboard = mdp.hinet.Switchboard(14,
                                        connections=np.r_[range(14),
                                                          range(14)])
    reslayer = mdp.hinet.Layer([reservoir, IdentityNode(14, 14)])
    model = mdp.Flow([switchboard, reslayer, readout])

    # other parameters
    model.trainnoise = 1.e-5
    model.randrange = 1
    return model
Esempio n. 12
0
    def __init__(self, size=100, conn=1, inputs=1, outputs=1):
        self.N = size
        self.ins = inputs
        self.outs = outputs

        self.Wout = np.random.rand(self.outs, self.N + self.ins) * 2 - 1
        self.W = np.random.rand(self.N, self.N) * 2 - 1
        self.Win = np.random.rand(self.N, self.ins) * 2 - 1
        self.x = np.zeros((self.N))

        # for aureservoir
        self.aunet = au.DoubleESN()
        self.aunet.setSize(size)
        self.aunet.setInputs(inputs)
        self.aunet.setOutputs(outputs)
        self.aunet.setInitParam(au.ALPHA, 1.)
        self.aunet.setInitParam(au.CONNECTIVITY, conn)
        self.aunet.setInitParam(au.IN_CONNECTIVITY, 1.)
        self.aunet.init()
        self.aunet.setWout(self.Wout)
        self.aunet.post()

        # for sparse simulation
        self.aunet.getW(self.W)
        self.Wsp = sparse.csr_matrix(
            self.W)  # not efficient, use lil to construct matrix

        # create matrix for pysparse and pyublas.sparse
        tmp = spmatrix.ll_mat(self.N, self.N, int(self.N * self.N * conn))
        #        tmp2 = pyublas.zeros((self.N, self.N), flavor=pyublas.SparseBuildMatrix )
        for i in range(self.N):
            for j in range(self.N):
                if self.W[i, j] != 0:
                    tmp[i, j] = self.W[i, j]
#                    tmp2[i,j] = self.W[i,j]
        self.Wsp2 = tmp.to_csr()
Esempio n. 13
0
def run_jaeger_simulation():
    """ Creates and initializes all networks.
    """
    nExperts = 500  # Number of voting experts
    #nExperts = 16      # for testing
    nInternalUnits = 4  # reservoir size per voter
    nInputUnits = 14  # number of input units
    nOutputUnits = 9  # number of output units, fixed at 9 for this task
    suppNr = 3  # called D in Jaegers paper (support points),
    # D semented reservoir states per sample

    # make a reservoir model
    model = au.DoubleESN()
    model.setSize(nInternalUnits)
    model.setInputs(nInputUnits)
    model.setOutputs(nOutputUnits)
    model.setInitParam(au.ALPHA, 0.2)
    model.setInitParam(au.CONNECTIVITY, 1.)
    model.setInitParam(au.IN_SCALE, 1.5)
    model.setInitParam(au.IN_CONNECTIVITY, 1.)
    model.setOutputAct(au.ACT_TANH)
    # leaky integrator ESN
    model.setSimAlgorithm(au.SIM_LI)
    model.setInitParam(au.LEAKING_RATE, 0.2)
    model.post()

    # Initialization

    trainOutsIndividual = np.zeros((270, nOutputUnits, nExperts))
    testOutsIndividual = np.zeros((370, nOutputUnits, nExperts))

    experts = []  # list with all reservoirs
    print "Initialization ..."
    for n in range(nExperts):
        newReservoir = au.DoubleESN(model)
        newReservoir.suppNr = suppNr
        experts.append(newReservoir)
        experts[n].init()  # init networks

    print "Load Benchmark Data ..."
    trainInputs, trainOutputs, testInputs, testOutputs, shifts = load_data()

    # shift trainOutputs into range [-0.9,0.9]
    for n in range(len(trainOutputs)):
        trainOutputs[n] = 1.8 * trainOutputs[n] - 0.9

    # Training
    print "Training ..."
    for n in range(nExperts):
        train_esn(trainInputs, trainOutputs, experts[n])

    # Testing
    print "Testing ..."

    # calculate output for each training example
    for n in range(nExperts):
        for i in range(270):
            trainOutsIndividual[i, :, n] = test_esn(trainInputs[i], experts[n])

        # now calc output for test examples
        for i in range(370):
            testOutsIndividual[i, :, n] = test_esn(testInputs[i], experts[n])

    # rescale signals into original scale
    trainOutsIndividual = (trainOutsIndividual + 0.9) / 1.8
    testOutsIndividual = (testOutsIndividual + 0.9) / 1.8

    # save data for analysis
    print "Writing results to disk ..."

    # get the rights targets
    trainTargetsIndividual = np.zeros((270, nOutputUnits))
    testTargetsIndividual = np.zeros((370, nOutputUnits))
    for i in range(270):
        trainTargetsIndividual[i, :] = trainOutputs[i][0, :]
    for i in range(370):
        testTargetsIndividual[i, :] = testOutputs[i][0, :]
    # rescale training targets
    trainTargetsIndividual = (trainTargetsIndividual + 0.9) / 1.8

    # write data to disk
    data = shelve.open("vowelresults.dat")
    data["trainOutsIndividual"] = trainOutsIndividual
    data["testOutsIndividual"] = testOutsIndividual
    data["trainTargetsIndividual"] = trainTargetsIndividual
    data["testTargetsIndividual"] = testTargetsIndividual
    data["nExperts"] = nExperts
    data["shifts"] = shifts
    data.close()

    print "... finished !"
Esempio n. 14
0
def hierarchical_reservoir(file="hierarchical_reservoir.dat"):
    #    signal = datasets.read_single_soundfile()
    signal, label = datasets.get_two_class_dataset(5)

    # normalize signal
    #    signal = signal / signal.max()

    # reservoir prototype
    prot = au.DoubleESN()
    prot.setInitParam(au.CONNECTIVITY, 0.2)
    prot.setInitParam(au.ALPHA, 0.8)
    prot.setSize(100)
    #    prot.setNoise( 1e-3 )
    #    prot.setReservoirAct( au.ACT_LINEAR )

    # Hierarchical Network with Reservoirs and SFA nodes
    layer1 = mdp.Flow([ReservoirNode(1, 100, 'float64', prot), \
                       mdp.nodes.SFANode(output_dim=3), \
#                       mdp.nodes.PCANode(output_dim=5,svd=True), \
                       ResampleNode(3, 0.3, window="hamming") ])
    #    prot.setSize(50)
    layer2 = mdp.Flow([ReservoirNode(3, 100, 'float64', prot), \
                       mdp.nodes.SFANode(output_dim=3), \
#                       mdp.nodes.PCANode(output_dim=5,svd=True), \
                       ResampleNode(3, 0.3, window="hamming") ])
    #    prot.setSize(50)
    layer3 = mdp.Flow([ReservoirNode(3, 100, 'float64', prot), \
#                       mdp.nodes.PCANode(output_dim=3,svd=True) ])

                       mdp.nodes.SFANode(output_dim=3),
                       ResampleNode(3, 0.3, window="hamming") ])
    layer4 = mdp.Flow([ReservoirNode(3, 100, 'float64', prot), \
#                       mdp.nodes.PCANode(output_dim=3,svd=True) ])

                       mdp.nodes.SFANode(output_dim=3),
                       ResampleNode(3, 0.3, window="hamming") ])
    layer5 = mdp.Flow([ReservoirNode(3, 100, 'float64', prot), \
#                       mdp.nodes.PCANode(output_dim=3,svd=True) ])

                       mdp.nodes.SFANode(output_dim=3),
                       ResampleNode(3, 0.3, window="hamming") ])
    layer6 = mdp.Flow([ReservoirNode(3, 100, 'float64', prot), \
#                       mdp.nodes.PCANode(output_dim=3,svd=True) ])

                       mdp.nodes.SFANode(output_dim=3),
                       ResampleNode(3, 0.3, window="hamming") ])

    # train and execute the layers
    layer1.train(signal)
    slow1 = layer1(signal)
    layer2.train(slow1)
    slow2 = layer2(slow1)
    layer3.train(slow2)
    slow3 = layer3(slow2)
    layer4.train(slow3)
    slow4 = layer4(slow3)
    layer5.train(slow4)
    slow5 = layer5(slow4)
    layer6.train(slow5)
    slow6 = layer6(slow5)

    print signal.shape, slow1.shape, slow2.shape, slow3.shape

    data = shelve.open(file)
    data["signal"] = signal
    data["label"] = label
    data["slow1"] = slow1
    data["slow2"] = slow2
    data["slow3"] = slow3
    data["slow4"] = slow4
    data["slow5"] = slow5
    data["slow6"] = slow6
    data["layers"] = 6
    data.close()
Esempio n. 15
0
def construct_mdp_esn(size=100,
                      conn=0.1,
                      ins=257,
                      outs=6,
                      washout=0,
                      svm=0,
                      svm_C=5,
                      lr=0.2,
                      in_scale=1.,
                      noise=0.,
                      datatype='float64'):
    """ A simple standard ESN.
    """
    outputs = outs
    prot = au.DoubleESN()
    prot.setSize(size)
    #    prot.setSize(400) # last output geht hier besser
    #    prot.setSize(1000) # bei last output 0 testerror
    prot.setInputs(ins)
    prot.setOutputs(outputs)
    prot.setInitParam(au.ALPHA, lr)
    #    prot.setInitParam(au.ALPHA, 0.5)
    prot.setInitParam(au.CONNECTIVITY, conn)
    prot.setInitParam(au.IN_SCALE, in_scale)
    prot.setInitParam(au.IN_CONNECTIVITY, 0.3)
    #    prot.setOutputAct(au.ACT_TANH)
    # leaky integrator ESN
    prot.setSimAlgorithm(au.SIM_LI)
    prot.setInitParam(au.LEAKING_RATE, lr)

    ins = prot.getInputs()
    size = prot.getSize()
    outs = prot.getOutputs()
    reservoir = ReservoirNode(ins, size, dtype=datatype, prototype=prot)
    if svm == 1:
        # using a L2-loss primal SVM with eps = 0.01
        # (faster if we have many timesteps)
        readout = BinaryLinearSVMNode((ins + size),
                                      outs,
                                      C=svm_C,
                                      solver_type=2,
                                      eps=0.01)
#        readout = BinaryLinearSVMNode(20,outs,C=svm_C,solver_type=2,eps=0.01)
    else:
        readout = LinearReadoutNode((ins + size), outs, ignore_ind=washout)

    # build hierarchical mdp network
    con_array = np.r_[range(ins), range(ins)]
    switchboard = mdp.hinet.Switchboard(ins, connections=con_array)
    res = mdp.hinet.Layer([reservoir, IdentityNode(ins, ins)])
    flow = mdp.Flow([
        switchboard,
        res,
        #                     ResampleNode(ins+size, 0.5, window="hamming"),
        #                     mdp.nodes.PCANode(output_dim=20,svd=True),
        #                     SquareStatesNode(size+ins),
        readout
    ])
    model = mdp.hinet.FlowNode(flow)

    # additional properties
    model.type = 'ESN_mdp'
    model.print_W = svm + 1  # print output weights after training
    model.dtype = datatype
    #    model.label = 0.9
    #    model.trainnoise = 1e-4
    model.trainnoise = noise
    model.testnoise = 0.
    model.randrange = 0
    model.output_act = "tanh"  # tanh
    #    model.bias = 0.01
    #    model.length_feature = 0
    #    model.scale_min = 0.
    #    model.scale_max = 1.
    model.reset_state = 1  # clear state for each example
    #    model.feature_key = "features_fft1024" # chroma features

    return model